bitkeeper revision 1.85 (3e56416dqUx5ejRHgvOG3eSEByO2Fg)
authorkaf24@labyrinth.cl.cam.ac.uk <kaf24@labyrinth.cl.cam.ac.uk>
Fri, 21 Feb 2003 15:10:37 +0000 (15:10 +0000)
committerkaf24@labyrinth.cl.cam.ac.uk <kaf24@labyrinth.cl.cam.ac.uk>
Fri, 21 Feb 2003 15:10:37 +0000 (15:10 +0000)
Many files:
  new file
  Sync of SMH's new SCSI world with latest repository.

35 files changed:
.rootkeys
xen-2.4.16/Rules.mk
xen-2.4.16/drivers/Makefile
xen-2.4.16/drivers/scsi/Makefile
xen-2.4.16/drivers/scsi/aacraid/Makefile [new file with mode: 0644]
xen-2.4.16/drivers/scsi/aacraid/README [new file with mode: 0644]
xen-2.4.16/drivers/scsi/aacraid/TODO [new file with mode: 0644]
xen-2.4.16/drivers/scsi/aacraid/aachba.c [new file with mode: 0644]
xen-2.4.16/drivers/scsi/aacraid/aacraid.h [new file with mode: 0644]
xen-2.4.16/drivers/scsi/aacraid/commctrl.c [new file with mode: 0644]
xen-2.4.16/drivers/scsi/aacraid/comminit.c [new file with mode: 0644]
xen-2.4.16/drivers/scsi/aacraid/commsup.c [new file with mode: 0644]
xen-2.4.16/drivers/scsi/aacraid/dpcsup.c [new file with mode: 0644]
xen-2.4.16/drivers/scsi/aacraid/linit.c [new file with mode: 0644]
xen-2.4.16/drivers/scsi/aacraid/rx.c [new file with mode: 0644]
xen-2.4.16/drivers/scsi/aacraid/sa.c [new file with mode: 0644]
xen-2.4.16/drivers/scsi/constants.c [new file with mode: 0644]
xen-2.4.16/drivers/scsi/hosts.c [new file with mode: 0644]
xen-2.4.16/drivers/scsi/hosts.h
xen-2.4.16/drivers/scsi/scsi.c
xen-2.4.16/drivers/scsi/scsi.h
xen-2.4.16/drivers/scsi/scsi_dma.c
xen-2.4.16/drivers/scsi/scsi_error.c
xen-2.4.16/drivers/scsi/scsi_ioctl.c
xen-2.4.16/drivers/scsi/scsi_lib.c
xen-2.4.16/drivers/scsi/scsi_merge.c
xen-2.4.16/drivers/scsi/scsi_module.c.inc [new file with mode: 0644]
xen-2.4.16/drivers/scsi/scsi_proc.c
xen-2.4.16/drivers/scsi/scsi_queue.c
xen-2.4.16/drivers/scsi/scsi_scan.c
xen-2.4.16/drivers/scsi/scsi_syms.c
xen-2.4.16/drivers/scsi/scsicam.c
xen-2.4.16/drivers/scsi/sd.c
xen-2.4.16/include/asm-i386/dma.h [new file with mode: 0644]
xen-2.4.16/include/xeno/interrupt.h

index 808942b2bf6ad14de46c19153fbe8937432de3a1..e56a578c7d42f0162331524cf3cef40abb75da8a 100644 (file)
--- a/.rootkeys
+++ b/.rootkeys
 3ddb79bfJaf0bkE1Y67bnll8-kjEPg xen-2.4.16/drivers/pci/setup-res.c
 3ddb79bfIcCWJsBDNcQQE3ok2Azn-Q xen-2.4.16/drivers/pci/syscall.c
 3ddb79be3kwzyKagpMHGoXZFdan7dg xen-2.4.16/drivers/scsi/Makefile
+3e564137sVLmo7rTnKLNzLSCvuUz8g xen-2.4.16/drivers/scsi/aacraid/Makefile
+3e5641379FXvLDjV-0OrRNOBTwL_Lw xen-2.4.16/drivers/scsi/aacraid/README
+3e564137MAHzHSRYoWGNmIferNjFVA xen-2.4.16/drivers/scsi/aacraid/TODO
+3e564137QLUGVlcwTpnMYATKahDmCg xen-2.4.16/drivers/scsi/aacraid/aachba.c
+3e564137Tm_t6rMYyxFtnY9yo_abMg xen-2.4.16/drivers/scsi/aacraid/aacraid.h
+3e564137AtfQtRJZiS9B00To0AmKSg xen-2.4.16/drivers/scsi/aacraid/commctrl.c
+3e564137a9aOGMOFgQ5CYQJBOD3DvQ xen-2.4.16/drivers/scsi/aacraid/comminit.c
+3e564137fw_bPuKcvVi0qwntc_RB-Q xen-2.4.16/drivers/scsi/aacraid/commsup.c
+3e564137iEHmY_e6xGZ7TLsjy5yS-g xen-2.4.16/drivers/scsi/aacraid/dpcsup.c
+3e564137taxQbEVa39h3mEVLFxRydQ xen-2.4.16/drivers/scsi/aacraid/linit.c
+3e564137vmALxpfK7vNINqEklSmQ1A xen-2.4.16/drivers/scsi/aacraid/rx.c
+3e564137EuYUJgvqnOunPERWxnp_mw xen-2.4.16/drivers/scsi/aacraid/sa.c
+3e56411aOLzeD5CRbuXquOrX0lRijw xen-2.4.16/drivers/scsi/constants.c
 3ddb79beXZxwKh7cGyPfr40bhDyRrA xen-2.4.16/drivers/scsi/constants.h
+3e564120ZeinH9nf3IVSerB80T7dHg xen-2.4.16/drivers/scsi/hosts.c
 3ddb79beGiGljlTNq_kRnCBZECgC9Q xen-2.4.16/drivers/scsi/hosts.h
 3ddb79bexarQo1tQ541PPUyK9HXNDA xen-2.4.16/drivers/scsi/scsi.c
 3ddb79beBOiYxQUiWTHosepRlJyuGA xen-2.4.16/drivers/scsi/scsi.h
 3ddb79berPStE_-ILQHgcl1BLDLywA xen-2.4.16/drivers/scsi/scsi_lib.c
 3ddb79beRXjB7_nNUbJMIRyjDmeByQ xen-2.4.16/drivers/scsi/scsi_merge.c
 3ddb79beGNb7Es1bATZAGsPZEu5F2Q xen-2.4.16/drivers/scsi/scsi_module.c
+3e56412a_O2cnz-e36volrKvofGe-Q xen-2.4.16/drivers/scsi/scsi_module.c.inc
 3ddb79beZ--AZB0twliIm3qmQJO8Zg xen-2.4.16/drivers/scsi/scsi_obsolete.c
 3ddb79beQgG_st0eBZUX8AQI7kBkHA xen-2.4.16/drivers/scsi/scsi_obsolete.h
 3ddb79beK65cNRldY0CFGXjZ3-A74Q xen-2.4.16/drivers/scsi/scsi_proc.c
 3ddb79c2jFkPAZTDmU35L6IUssYMgQ xen-2.4.16/include/asm-i386/debugreg.h
 3ddb79c3r9-31dIsewPV3P3i8HALsQ xen-2.4.16/include/asm-i386/delay.h
 3ddb79c34BFiXjBJ_cCKB0aCsV1IDw xen-2.4.16/include/asm-i386/desc.h
+3e564149UkU91RX7onzpCAmbj_IFjw xen-2.4.16/include/asm-i386/dma.h
 3e20b82fl1jmQiKdLy7fxMcutfpjWA xen-2.4.16/include/asm-i386/domain_page.h
 3ddb79c2O729EttZTYu1c8LcsUO_GQ xen-2.4.16/include/asm-i386/elf.h
 3ddb79c3NU8Zy40OTrq3D-i30Y3t4A xen-2.4.16/include/asm-i386/fixmap.h
index 33fb3d314bf7e07e76fc2fd5e84d2c3a5e5e0398..13a57ed550fa1bc067c3ce387154046cd8d9ed1b 100644 (file)
@@ -20,6 +20,7 @@ ALL_OBJS += $(BASEDIR)/drivers/pci/driver.o
 ALL_OBJS += $(BASEDIR)/drivers/net/driver.o
 ALL_OBJS += $(BASEDIR)/drivers/block/driver.o
 ALL_OBJS += $(BASEDIR)/drivers/ide/driver.o
+#ALL_OBJS += $(BASEDIR)/drivers/scsi/driver.o
 ALL_OBJS += $(BASEDIR)/arch/$(ARCH)/arch.o
 
 HOSTCC     = gcc
index bee17fa208de0f9e903d3002e68dd9c82ded80cc..4aa76a3f25975eecba509913842787922cb8db15 100644 (file)
@@ -5,7 +5,7 @@ default:
        $(MAKE) -C net
        $(MAKE) -C block
        $(MAKE) -C ide
-#      $(MAKE) -C scsi
+       $(MAKE) -C scsi
 
 clean:
        $(MAKE) -C char clean
@@ -13,4 +13,4 @@ clean:
        $(MAKE) -C net clean
        $(MAKE) -C block clean
        $(MAKE) -C ide clean
-#      $(MAKE) -C scsi clean
+       $(MAKE) -C scsi clean
index 574b7d2d79301e69e3ba0129527eb5a328d4da82..5b480bdf539b90df221addfe619efaed72229cef 100644 (file)
@@ -2,7 +2,10 @@
 include $(BASEDIR)/Rules.mk
 
 default: $(OBJS)
-       $(LD) -r -o driver.o $(OBJS)
+       $(MAKE) -C aacraid
+       $(LD) -r -o driver.o $(OBJS) aacraid/aacraid.o
+#      $(LD) -r -o driver.o $(OBJS) 
 
 clean:
+       $(MAKE) -C aacraid clean
        rm -f *.o *~ core
diff --git a/xen-2.4.16/drivers/scsi/aacraid/Makefile b/xen-2.4.16/drivers/scsi/aacraid/Makefile
new file mode 100644 (file)
index 0000000..7d802c3
--- /dev/null
@@ -0,0 +1,17 @@
+
+include $(BASEDIR)/Rules.mk
+
+CFLAGS += -I$(BASEDIR)/drivers/scsi
+
+
+# -y           := linit.o aachba.o commctrl.o comminit.o commsup.o \
+#                 dpcsup.o rx.o sa.o
+
+default: $(OBJS)
+       $(LD) -r -o aacraid.o $(OBJS)
+
+clean:
+       rm -f *.o *~ core
+
+
+
diff --git a/xen-2.4.16/drivers/scsi/aacraid/README b/xen-2.4.16/drivers/scsi/aacraid/README
new file mode 100644 (file)
index 0000000..9f73c67
--- /dev/null
@@ -0,0 +1,42 @@
+AACRAID Driver for Linux (take two)
+
+Introduction
+-------------------------
+The aacraid driver adds support for Adaptec (http://www.adaptec.com)
+RAID controllers. This is a major rewrite from the original 
+Adaptec supplied driver. It has signficantly cleaned up both the code
+and the running binary size (the module is less than half the size of
+the original).
+
+Supported Cards/Chipsets
+-------------------------
+       Dell Computer Corporation PERC 2 Quad Channel
+       Dell Computer Corporation PERC 2/Si
+       Dell Computer Corporation PERC 3/Si
+       Dell Computer Corporation PERC 3/Di
+       HP NetRAID-4M
+       ADAPTEC 2120S
+       ADAPTEC 2200S
+       ADAPTEC 5400S
+
+People
+-------------------------
+Alan Cox <alan@redhat.com>
+Christoph Hellwig <hch@infradead.org>  (small cleanups/fixes)
+Matt Domsch <matt_domsch@dell.com>     (revision ioctl, adapter messages)
+Deanna Bonds <deanna_bonds@adaptec.com> (non-DASD support, PAE fibs and 64 bit, added new adaptec controllers
+                                        added new ioctls, changed scsi interface to use new error handler,
+                                        increased the number of fibs and outstanding commands to a container)
+
+Original Driver
+-------------------------
+Adaptec Unix OEM Product Group
+
+Mailing List
+-------------------------
+None currently. Also note this is very different to Brian's original driver
+so don't expect him to support it.
+Adaptec does support this driver.  Contact either tech support or deanna bonds.
+
+Original by Brian Boerner February 2001
+Rewritten by Alan Cox, November 2001
diff --git a/xen-2.4.16/drivers/scsi/aacraid/TODO b/xen-2.4.16/drivers/scsi/aacraid/TODO
new file mode 100644 (file)
index 0000000..6f71022
--- /dev/null
@@ -0,0 +1,4 @@
+o      Testing
+o      More testing
+o      Feature request: display the firmware/bios/etc revisions in the
+       /proc info
diff --git a/xen-2.4.16/drivers/scsi/aacraid/aachba.c b/xen-2.4.16/drivers/scsi/aacraid/aachba.c
new file mode 100644 (file)
index 0000000..21fc425
--- /dev/null
@@ -0,0 +1,1685 @@
+/*
+ *     Adaptec AAC series RAID controller driver
+ *     (c) Copyright 2001 Red Hat Inc. <alan@redhat.com>
+ *
+ * based on the old aacraid driver that is..
+ * Adaptec aacraid device driver for Linux.
+ * Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING.  If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <xeno/config.h>
+/*  #include <xeno/kernel.h> */
+#include <xeno/init.h>
+#include <xeno/sched.h>
+#include <xeno/pci.h>
+/*  #include <xeno/spinlock.h> */
+/*  #include <xeno/slab.h> */
+/*  #include <xeno/completion.h> */
+/*  #include <asm/semaphore.h> */
+#include <asm/uaccess.h>
+#define MAJOR_NR SCSI_DISK0_MAJOR      /* For DEVICE_NR() */
+#include <linux/blk.h>
+#include "scsi.h"
+#include "hosts.h"
+#include "sd.h"
+
+#include "aacraid.h"
+
+/*     SCSI Commands */
+/*     TODO:  dmb - use the ones defined in include/scsi/scsi.h */
+
+#define        SS_TEST                 0x00    /* Test unit ready */
+#define SS_REZERO              0x01    /* Rezero unit */
+#define        SS_REQSEN               0x03    /* Request Sense */
+#define SS_REASGN              0x07    /* Reassign blocks */
+#define        SS_READ                 0x08    /* Read 6   */
+#define        SS_WRITE                0x0A    /* Write 6  */
+#define        SS_INQUIR               0x12    /* inquiry */
+#define        SS_ST_SP                0x1B    /* Start/Stop unit */
+#define        SS_LOCK                 0x1E    /* prevent/allow medium removal */
+#define SS_RESERV              0x16    /* Reserve */
+#define SS_RELES               0x17    /* Release */
+#define SS_MODESEN             0x1A    /* Mode Sense 6 */
+#define        SS_RDCAP                0x25    /* Read Capacity */
+#define        SM_READ                 0x28    /* Read 10  */
+#define        SM_WRITE                0x2A    /* Write 10 */
+#define SS_SEEK                        0x2B    /* Seek */
+
+/* values for inqd_pdt: Peripheral device type in plain English */
+#define        INQD_PDT_DA     0x00            /* Direct-access (DISK) device */
+#define        INQD_PDT_PROC   0x03            /* Processor device */
+#define        INQD_PDT_CHNGR  0x08            /* Changer (jukebox, scsi2) */
+#define        INQD_PDT_COMM   0x09            /* Communication device (scsi2) */
+#define        INQD_PDT_NOLUN2 0x1f            /* Unknown Device (scsi2) */
+#define        INQD_PDT_NOLUN  0x7f            /* Logical Unit Not Present */
+
+#define        INQD_PDT_DMASK  0x1F            /* Peripheral Device Type Mask */
+#define        INQD_PDT_QMASK  0xE0            /* Peripheral Device Qualifer Mask */
+
+#define        TARGET_LUN_TO_CONTAINER(target, lun)    (target)
+#define CONTAINER_TO_TARGET(cont)              ((cont))
+#define CONTAINER_TO_LUN(cont)                 (0)
+
+#define MAX_FIB_DATA (sizeof(struct hw_fib) - sizeof(FIB_HEADER))
+
+#define MAX_DRIVER_SG_SEGMENT_COUNT 17
+
+/*
+ *     Sense keys
+ */
+#define SENKEY_NO_SENSE                        0x00    
+#define SENKEY_UNDEFINED                       0x01    
+#define SENKEY_NOT_READY                       0x02    
+#define SENKEY_MEDIUM_ERR                      0x03    
+#define SENKEY_HW_ERR                          0x04    
+#define SENKEY_ILLEGAL                         0x05    
+#define SENKEY_ATTENTION                       0x06    
+#define SENKEY_PROTECTED                       0x07    
+#define SENKEY_BLANK                           0x08    
+#define SENKEY_V_UNIQUE                        0x09    
+#define SENKEY_CPY_ABORT                       0x0A    
+#define SENKEY_ABORT                           0x0B    
+#define SENKEY_EQUAL                           0x0C    
+#define SENKEY_VOL_OVERFLOW                    0x0D    
+#define SENKEY_MISCOMP                         0x0E    
+#define SENKEY_RESERVED                        0x0F    
+
+/*
+ *     Sense codes
+ */
+#define SENCODE_NO_SENSE                        0x00
+#define SENCODE_END_OF_DATA                     0x00
+#define SENCODE_BECOMING_READY                  0x04
+#define SENCODE_INIT_CMD_REQUIRED               0x04
+#define SENCODE_PARAM_LIST_LENGTH_ERROR         0x1A
+#define SENCODE_INVALID_COMMAND                 0x20
+#define SENCODE_LBA_OUT_OF_RANGE                0x21
+#define SENCODE_INVALID_CDB_FIELD               0x24
+#define SENCODE_LUN_NOT_SUPPORTED               0x25
+#define SENCODE_INVALID_PARAM_FIELD             0x26
+#define SENCODE_PARAM_NOT_SUPPORTED             0x26
+#define SENCODE_PARAM_VALUE_INVALID             0x26
+#define SENCODE_RESET_OCCURRED                  0x29
+#define SENCODE_LUN_NOT_SELF_CONFIGURED_YET     0x3E
+#define SENCODE_INQUIRY_DATA_CHANGED            0x3F
+#define SENCODE_SAVING_PARAMS_NOT_SUPPORTED     0x39
+#define SENCODE_DIAGNOSTIC_FAILURE              0x40
+#define SENCODE_INTERNAL_TARGET_FAILURE         0x44
+#define SENCODE_INVALID_MESSAGE_ERROR           0x49
+#define SENCODE_LUN_FAILED_SELF_CONFIG          0x4c
+#define SENCODE_OVERLAPPED_COMMAND              0x4E
+
+/*
+ *     Additional sense codes
+ */
+#define ASENCODE_NO_SENSE                       0x00
+#define ASENCODE_END_OF_DATA                    0x05
+#define ASENCODE_BECOMING_READY                 0x01
+#define ASENCODE_INIT_CMD_REQUIRED              0x02
+#define ASENCODE_PARAM_LIST_LENGTH_ERROR        0x00
+#define ASENCODE_INVALID_COMMAND                0x00
+#define ASENCODE_LBA_OUT_OF_RANGE               0x00
+#define ASENCODE_INVALID_CDB_FIELD              0x00
+#define ASENCODE_LUN_NOT_SUPPORTED              0x00
+#define ASENCODE_INVALID_PARAM_FIELD            0x00
+#define ASENCODE_PARAM_NOT_SUPPORTED            0x01
+#define ASENCODE_PARAM_VALUE_INVALID            0x02
+#define ASENCODE_RESET_OCCURRED                 0x00
+#define ASENCODE_LUN_NOT_SELF_CONFIGURED_YET    0x00
+#define ASENCODE_INQUIRY_DATA_CHANGED           0x03
+#define ASENCODE_SAVING_PARAMS_NOT_SUPPORTED    0x00
+#define ASENCODE_DIAGNOSTIC_FAILURE             0x80
+#define ASENCODE_INTERNAL_TARGET_FAILURE        0x00
+#define ASENCODE_INVALID_MESSAGE_ERROR          0x00
+#define ASENCODE_LUN_FAILED_SELF_CONFIG         0x00
+#define ASENCODE_OVERLAPPED_COMMAND             0x00
+
+#define BYTE0(x) (unsigned char)(x)
+#define BYTE1(x) (unsigned char)((x) >> 8)
+#define BYTE2(x) (unsigned char)((x) >> 16)
+#define BYTE3(x) (unsigned char)((x) >> 24)
+
+/*------------------------------------------------------------------------------
+ *              S T R U C T S / T Y P E D E F S
+ *----------------------------------------------------------------------------*/
+/* SCSI inquiry data */
+struct inquiry_data {
+       u8 inqd_pdt;            /* Peripheral qualifier | Peripheral Device Type  */
+       u8 inqd_dtq;            /* RMB | Device Type Qualifier  */
+       u8 inqd_ver;            /* ISO version | ECMA version | ANSI-approved version */
+       u8 inqd_rdf;            /* AENC | TrmIOP | Response data format */
+       u8 inqd_len;            /* Additional length (n-4) */
+       u8 inqd_pad1[2];        /* Reserved - must be zero */
+       u8 inqd_pad2;           /* RelAdr | WBus32 | WBus16 |  Sync  | Linked |Reserved| CmdQue | SftRe */
+       u8 inqd_vid[8];         /* Vendor ID */
+       u8 inqd_pid[16];        /* Product ID */
+       u8 inqd_prl[4];         /* Product Revision Level */
+};
+
+struct sense_data {
+       u8 error_code;          /* 70h (current errors), 71h(deferred errors) */
+       u8 valid:1;             /* A valid bit of one indicates that the information  */
+       /* field contains valid information as defined in the
+        * SCSI-2 Standard.
+        */
+       u8 segment_number;      /* Only used for COPY, COMPARE, or COPY AND VERIFY Commands */
+       u8 sense_key:4;         /* Sense Key */
+       u8 reserved:1;
+       u8 ILI:1;               /* Incorrect Length Indicator */
+       u8 EOM:1;               /* End Of Medium - reserved for random access devices */
+       u8 filemark:1;          /* Filemark - reserved for random access devices */
+
+       u8 information[4];      /* for direct-access devices, contains the unsigned 
+                                * logical block address or residue associated with 
+                                * the sense key 
+                                */
+       u8 add_sense_len;       /* number of additional sense bytes to follow this field */
+       u8 cmnd_info[4];        /* not used */
+       u8 ASC;                 /* Additional Sense Code */
+       u8 ASCQ;                /* Additional Sense Code Qualifier */
+       u8 FRUC;                /* Field Replaceable Unit Code - not used */
+       u8 bit_ptr:3;           /* indicates which byte of the CDB or parameter data
+                                * was in error
+                                */
+       u8 BPV:1;               /* bit pointer valid (BPV): 1- indicates that 
+                                * the bit_ptr field has valid value
+                                */
+       u8 reserved2:2;
+       u8 CD:1;                /* command data bit: 1- illegal parameter in CDB.
+                                * 0- illegal parameter in data.
+                                */
+       u8 SKSV:1;
+       u8 field_ptr[2];        /* byte of the CDB or parameter data in error */
+};
+
+/*
+ *              M O D U L E   G L O B A L S
+ */
+static struct fsa_scsi_hba *fsa_dev[MAXIMUM_NUM_ADAPTERS]; /*  SCSI Device 
+                                                              Instance Ptrs */
+static struct sense_data sense_data[MAXIMUM_NUM_CONTAINERS];
+static void get_sd_devname(int disknum, char *buffer);
+static unsigned long aac_build_sg(Scsi_Cmnd* scsicmd, struct sgmap* sgmap);
+static unsigned long aac_build_sg64(Scsi_Cmnd* scsicmd, struct sgmap64* psg);
+static int aac_send_srb_fib(Scsi_Cmnd* scsicmd);
+#ifdef AAC_DETAILED_STATUS_INFO
+static char *aac_get_status_string(u32 status);
+#endif
+
+/**
+ *     aac_get_containers      -       list containers
+ *     @common: adapter to probe
+ *
+ *     Make a list of all containers on this controller
+ */
+int aac_get_containers(struct aac_dev *dev)
+{
+    struct fsa_scsi_hba *fsa_dev_ptr;
+    u32 index, status = 0;
+    struct aac_query_mount *dinfo;
+    struct aac_mount *dresp;
+    struct fib * fibptr;
+    unsigned instance;
+    
+    fsa_dev_ptr = &(dev->fsa_dev);
+    instance = dev->scsi_host_ptr->unique_id;
+    
+    if (!(fibptr = fib_alloc(dev)))
+       return -ENOMEM;
+    
+    for (index = 0; index < MAXIMUM_NUM_CONTAINERS; index++) {
+       fib_init(fibptr);
+       dinfo = (struct aac_query_mount *) fib_data(fibptr);
+       
+       dinfo->command = cpu_to_le32(VM_NameServe);
+       dinfo->count = cpu_to_le32(index);
+       dinfo->type = cpu_to_le32(FT_FILESYS);
+
+       printk("aac_get_container: getting info for container %d\n", index); 
+       status = fib_send(ContainerCommand,
+                         fibptr,
+                         sizeof (struct aac_query_mount),
+                         FsaNormal,
+                         1, 1,
+                         NULL, NULL);
+       if (status < 0 ) {
+           printk(KERN_WARNING "ProbeContainers: SendFIB failed.\n");
+           break;
+       }
+       dresp = (struct aac_mount *)fib_data(fibptr);
+       
+       if ((le32_to_cpu(dresp->status) == ST_OK) &&
+           (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE)) {
+           fsa_dev_ptr->valid[index] = 1;
+           fsa_dev_ptr->type[index] = le32_to_cpu(dresp->mnt[0].vol);
+           fsa_dev_ptr->size[index] = le32_to_cpu(dresp->mnt[0].capacity);
+           if (le32_to_cpu(dresp->mnt[0].state) & FSCS_READONLY)
+               fsa_dev_ptr->ro[index] = 1;
+       }
+       fib_complete(fibptr);
+       /*
+        *      If there are no more containers, then stop asking.
+        */
+       if ((index + 1) >= le32_to_cpu(dresp->count))
+           break;
+    }
+    fib_free(fibptr);
+    fsa_dev[instance] = fsa_dev_ptr;
+    return status;
+}
+
+/**
+ *     probe_container         -       query a logical volume
+ *     @dev: device to query
+ *     @cid: container identifier
+ *
+ *     Queries the controller about the given volume. The volume information
+ *     is updated in the struct fsa_scsi_hba structure rather than returned.
+ */
+static int probe_container(struct aac_dev *dev, int cid)
+{
+    struct fsa_scsi_hba *fsa_dev_ptr;
+    int status;
+    struct aac_query_mount *dinfo;
+    struct aac_mount *dresp;
+    struct fib * fibptr;
+    unsigned instance;
+    
+    fsa_dev_ptr = &(dev->fsa_dev);
+    instance = dev->scsi_host_ptr->unique_id;
+    
+    if (!(fibptr = fib_alloc(dev)))
+       return -ENOMEM;
+    
+    fib_init(fibptr);
+    
+    dinfo = (struct aac_query_mount *)fib_data(fibptr);
+    
+    dinfo->command = cpu_to_le32(VM_NameServe);
+    dinfo->count = cpu_to_le32(cid);
+    dinfo->type = cpu_to_le32(FT_FILESYS);
+    
+    status = fib_send(ContainerCommand,
+                     fibptr,
+                     sizeof(struct aac_query_mount),
+                     FsaNormal,
+                     1, 1,
+                     NULL, NULL);
+    if (status < 0) {
+       printk(KERN_WARNING "aacraid: probe_containers query failed.\n");
+       goto error;
+    }
+    
+    dresp = (struct aac_mount *) fib_data(fibptr);
+    
+    if ((le32_to_cpu(dresp->status) == ST_OK) &&
+       (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE)) {
+       fsa_dev_ptr->valid[cid] = 1;
+       fsa_dev_ptr->type[cid] = le32_to_cpu(dresp->mnt[0].vol);
+       fsa_dev_ptr->size[cid] = le32_to_cpu(dresp->mnt[0].capacity);
+       if (le32_to_cpu(dresp->mnt[0].state) & FSCS_READONLY)
+           fsa_dev_ptr->ro[cid] = 1;
+    }
+    
+ error:
+    fib_complete(fibptr);
+    fib_free(fibptr);
+    
+    return status;
+}
+
+/* Local Structure to set SCSI inquiry data strings */
+struct scsi_inq {
+       char vid[8];         /* Vendor ID */
+       char pid[16];        /* Product ID */
+       char prl[4];         /* Product Revision Level */
+};
+
+/**
+ *     InqStrCopy      -       string merge
+ *     @a:     string to copy from
+ *     @b:     string to copy to
+ *
+ *     Copy a String from one location to another
+ *     without copying \0
+ */
+
+static void inqstrcpy(char *a, char *b)
+{
+
+       while(*a != (char)0) 
+               *b++ = *a++;
+}
+
+static char *container_types[] = {
+        "None",
+        "Volume",
+        "Mirror",
+        "Stripe",
+        "RAID5",
+        "SSRW",
+        "SSRO",
+        "Morph",
+        "Legacy",
+        "RAID4",
+        "RAID10",             
+        "RAID00",             
+        "V-MIRRORS",          
+        "PSEUDO R4",          
+       "RAID50",
+        "Unknown"
+};
+
+
+
+/* Function: setinqstr
+ *
+ * Arguments: [1] pointer to void [1] int
+ *
+ * Purpose: Sets SCSI inquiry data strings for vendor, product
+ * and revision level. Allows strings to be set in platform dependant
+ * files instead of in OS dependant driver source.
+ */
+
+static void setinqstr(int devtype, void *data, int tindex)
+{
+       struct scsi_inq *str;
+       char *findit;
+       struct aac_driver_ident *mp;
+
+       mp = aac_get_driver_ident(devtype);
+   
+       str = (struct scsi_inq *)(data); /* cast data to scsi inq block */
+
+       inqstrcpy (mp->vname, str->vid); 
+       inqstrcpy (mp->model, str->pid); /* last six chars reserved for vol type */
+
+       findit = str->pid;
+
+       for ( ; *findit != ' '; findit++); /* walk till we find a space then incr by 1 */
+               findit++;
+       
+       if (tindex < (sizeof(container_types)/sizeof(char *))){
+               inqstrcpy (container_types[tindex], findit);
+       }
+       inqstrcpy ("V1.0", str->prl);
+}
+
+void set_sense(u8 *sense_buf, u8 sense_key, u8 sense_code,
+                   u8 a_sense_code, u8 incorrect_length,
+                   u8 bit_pointer, u16 field_pointer,
+                   u32 residue)
+{
+       sense_buf[0] = 0xF0;    /* Sense data valid, err code 70h (current error) */
+       sense_buf[1] = 0;       /* Segment number, always zero */
+
+       if (incorrect_length) {
+               sense_buf[2] = sense_key | 0x20;        /* Set ILI bit | sense key */
+               sense_buf[3] = BYTE3(residue);
+               sense_buf[4] = BYTE2(residue);
+               sense_buf[5] = BYTE1(residue);
+               sense_buf[6] = BYTE0(residue);
+       } else
+               sense_buf[2] = sense_key;       /* Sense key */
+
+       if (sense_key == SENKEY_ILLEGAL)
+               sense_buf[7] = 10;      /* Additional sense length */
+       else
+               sense_buf[7] = 6;       /* Additional sense length */
+
+       sense_buf[12] = sense_code;     /* Additional sense code */
+       sense_buf[13] = a_sense_code;   /* Additional sense code qualifier */
+       if (sense_key == SENKEY_ILLEGAL) {
+               sense_buf[15] = 0;
+
+               if (sense_code == SENCODE_INVALID_PARAM_FIELD)
+                       sense_buf[15] = 0x80;   /* Std sense key specific field */
+               /* Illegal parameter is in the parameter block */
+
+               if (sense_code == SENCODE_INVALID_CDB_FIELD)
+                       sense_buf[15] = 0xc0;   /* Std sense key specific field */
+               /* Illegal parameter is in the CDB block */
+               sense_buf[15] |= bit_pointer;
+               sense_buf[16] = field_pointer >> 8;     /* MSB */
+               sense_buf[17] = field_pointer;          /* LSB */
+       }
+}
+
+static void aac_io_done(Scsi_Cmnd * scsicmd)
+{
+       unsigned long cpu_flags;
+       spin_lock_irqsave(&io_request_lock, cpu_flags);
+       scsicmd->scsi_done(scsicmd);
+       spin_unlock_irqrestore(&io_request_lock, cpu_flags);
+}
+
+static void __aac_io_done(Scsi_Cmnd * scsicmd)
+{
+       scsicmd->scsi_done(scsicmd);
+}
+
+int aac_get_adapter_info(struct aac_dev* dev)
+{
+       struct fib* fibptr;
+       struct aac_adapter_info* info;
+       int rcode;
+       u32 tmp;
+
+       if (!(fibptr = fib_alloc(dev)))
+               return -ENOMEM;
+
+       fib_init(fibptr);
+       info = (struct aac_adapter_info*) fib_data(fibptr);
+
+       memset(info,0,sizeof(struct aac_adapter_info));
+
+       rcode = fib_send(RequestAdapterInfo,
+                       fibptr, 
+                       sizeof(struct aac_adapter_info),
+                       FsaNormal, 
+                       1, 1, 
+                       NULL, 
+                       NULL);
+
+       memcpy(&dev->adapter_info, info, sizeof(struct aac_adapter_info));
+
+       tmp = dev->adapter_info.kernelrev;
+       printk(KERN_INFO "%s%d: kernel %d.%d.%d build %d\n", 
+                       dev->name, dev->id,
+                       tmp>>24,(tmp>>16)&0xff,(tmp>>8)&0xff,
+                       dev->adapter_info.kernelbuild);
+       tmp = dev->adapter_info.monitorrev;
+       printk(KERN_INFO "%s%d: monitor %d.%d.%d build %d\n", 
+                       dev->name, dev->id,
+                       tmp>>24,(tmp>>16)&0xff,(tmp>>8)&0xff,
+                       dev->adapter_info.monitorbuild);
+       tmp = dev->adapter_info.biosrev;
+       printk(KERN_INFO "%s%d: bios %d.%d.%d build %d\n", 
+                       dev->name, dev->id,
+                       tmp>>24,(tmp>>16)&0xff,(tmp>>8)&0xff,
+                       dev->adapter_info.biosbuild);
+       printk(KERN_INFO "%s%d: serial %x%x\n",
+                       dev->name, dev->id,
+                       dev->adapter_info.serial[0],
+                       dev->adapter_info.serial[1]);
+       dev->pae_support = 0;
+       dev->nondasd_support = 0;
+       if( BITS_PER_LONG >= 64 && 
+         (dev->adapter_info.options & AAC_OPT_SGMAP_HOST64)){
+               printk(KERN_INFO "%s%d: 64 Bit PAE enabled\n", 
+                      dev->name, dev->id);
+               dev->pae_support = 1;
+       }
+       /* TODO - dmb temporary until fw can set this bit  */
+       dev->pae_support = (BITS_PER_LONG >= 64);
+       if(dev->pae_support != 0) {
+               printk(KERN_INFO "%s%d: 64 Bit PAE enabled\n", 
+                      dev->name, dev->id);
+       }
+
+       if(dev->adapter_info.options & AAC_OPT_NONDASD){
+               dev->nondasd_support = 1;
+       }
+       return rcode;
+}
+
+
+static void read_callback(void *context, struct fib * fibptr)
+{
+       struct aac_dev *dev;
+       struct aac_read_reply *readreply;
+       Scsi_Cmnd *scsicmd;
+       u32 lba;
+       u32 cid;
+
+       scsicmd = (Scsi_Cmnd *) context;
+
+       dev = (struct aac_dev *)scsicmd->host->hostdata;
+       cid =TARGET_LUN_TO_CONTAINER(scsicmd->target, scsicmd->lun);
+
+       lba = ((scsicmd->cmnd[1] & 0x1F) << 16) | (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3];
+       dprintk((KERN_DEBUG "read_callback[cpu %d]: lba = %d, t = %ld.\n", smp_processor_id(), lba, jiffies));
+
+       if (fibptr == NULL)
+               BUG();
+               
+       if(scsicmd->use_sg)
+               pci_unmap_sg(dev->pdev, 
+                       (struct scatterlist *)scsicmd->buffer,
+                       scsicmd->use_sg,
+                       scsi_to_pci_dma_dir(scsicmd->sc_data_direction));
+       else if(scsicmd->request_bufflen)
+               pci_unmap_single(dev->pdev, (dma_addr_t)(unsigned long)scsicmd->SCp.ptr,
+                                scsicmd->request_bufflen,
+                                scsi_to_pci_dma_dir(scsicmd->sc_data_direction));
+       readreply = (struct aac_read_reply *)fib_data(fibptr);
+       if (le32_to_cpu(readreply->status) == ST_OK)
+               scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | GOOD;
+       else {
+               printk(KERN_WARNING "read_callback: read failed, status = %d\n", readreply->status);
+               scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | CHECK_CONDITION;
+               set_sense((u8 *) &sense_data[cid],
+                                   SENKEY_HW_ERR,
+                                   SENCODE_INTERNAL_TARGET_FAILURE,
+                                   ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0,
+                                   0, 0);
+       }
+       fib_complete(fibptr);
+       fib_free(fibptr);
+
+       aac_io_done(scsicmd);
+}
+
+static void write_callback(void *context, struct fib * fibptr)
+{
+       struct aac_dev *dev;
+       struct aac_write_reply *writereply;
+       Scsi_Cmnd *scsicmd;
+       u32 lba;
+       u32 cid;
+
+       scsicmd = (Scsi_Cmnd *) context;
+       dev = (struct aac_dev *)scsicmd->host->hostdata;
+       cid = TARGET_LUN_TO_CONTAINER(scsicmd->target, scsicmd->lun);
+
+       lba = ((scsicmd->cmnd[1] & 0x1F) << 16) | (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3];
+       dprintk((KERN_DEBUG "write_callback[cpu %d]: lba = %d, t = %ld.\n", smp_processor_id(), lba, jiffies));
+       if (fibptr == NULL)
+               BUG();
+
+       if(scsicmd->use_sg)
+               pci_unmap_sg(dev->pdev, 
+                       (struct scatterlist *)scsicmd->buffer,
+                       scsicmd->use_sg,
+                       scsi_to_pci_dma_dir(scsicmd->sc_data_direction));
+       else if(scsicmd->request_bufflen)
+               pci_unmap_single(dev->pdev, (dma_addr_t)(unsigned long)scsicmd->SCp.ptr,
+                                scsicmd->request_bufflen,
+                                scsi_to_pci_dma_dir(scsicmd->sc_data_direction));
+
+       writereply = (struct aac_write_reply *) fib_data(fibptr);
+       if (le32_to_cpu(writereply->status) == ST_OK)
+               scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | GOOD;
+       else {
+               printk(KERN_WARNING "write_callback: write failed, status = %d\n", writereply->status);
+               scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | CHECK_CONDITION;
+               set_sense((u8 *) &sense_data[cid],
+                                   SENKEY_HW_ERR,
+                                   SENCODE_INTERNAL_TARGET_FAILURE,
+                                   ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0,
+                                   0, 0);
+       }
+
+       fib_complete(fibptr);
+       fib_free(fibptr);
+       aac_io_done(scsicmd);
+}
+
+int aac_read(Scsi_Cmnd * scsicmd, int cid)
+{
+       u32 lba;
+       u32 count;
+       int status;
+
+       u16 fibsize;
+       struct aac_dev *dev;
+       struct fib * cmd_fibcontext;
+
+       dev = (struct aac_dev *)scsicmd->host->hostdata;
+       /*
+        *      Get block address and transfer length
+        */
+       if (scsicmd->cmnd[0] == SS_READ)        /* 6 byte command */
+       {
+               dprintk((KERN_DEBUG "aachba: received a read(6) command on target %d.\n", cid));
+
+               lba = ((scsicmd->cmnd[1] & 0x1F) << 16) | (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3];
+               count = scsicmd->cmnd[4];
+
+               if (count == 0)
+                       count = 256;
+       } else {
+               dprintk((KERN_DEBUG "aachba: received a read(10) command on target %d.\n", cid));
+
+               lba = (scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16) | (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
+               count = (scsicmd->cmnd[7] << 8) | scsicmd->cmnd[8];
+       }
+       dprintk((KERN_DEBUG "aac_read[cpu %d]: lba = %u, t = %ld.\n", smp_processor_id(), lba, jiffies));
+       /*
+        *      Alocate and initialize a Fib
+        */
+       if (!(cmd_fibcontext = fib_alloc(dev))) {
+               scsicmd->result = DID_ERROR << 16;
+               aac_io_done(scsicmd);
+               return (-1);
+       }
+
+       fib_init(cmd_fibcontext);
+
+       if(dev->pae_support == 1){
+               struct aac_read64 *readcmd;
+               readcmd = (struct aac_read64 *) fib_data(cmd_fibcontext);
+               readcmd->command = cpu_to_le32(VM_CtHostRead64);
+               readcmd->cid = cpu_to_le16(cid);
+               readcmd->sector_count = cpu_to_le16(count);
+               readcmd->block = cpu_to_le32(lba);
+               readcmd->pad   = cpu_to_le16(0);
+               readcmd->flags = cpu_to_le16(0); 
+               
+               aac_build_sg64(scsicmd, &readcmd->sg);
+               if(readcmd->sg.count > MAX_DRIVER_SG_SEGMENT_COUNT)
+                       BUG();
+               fibsize = sizeof(struct aac_read64) + 
+                   ((readcmd->sg.count - 1) * sizeof (struct sgentry64));
+               /*
+                *      Now send the Fib to the adapter
+                */
+               status = fib_send(ContainerCommand64, 
+                         cmd_fibcontext, 
+                         fibsize, 
+                         FsaNormal, 
+                         0, 1, 
+                         (fib_callback) read_callback, 
+                         (void *) scsicmd);
+       } else {
+               struct aac_read *readcmd;
+               readcmd = (struct aac_read *) fib_data(cmd_fibcontext);
+               readcmd->command = cpu_to_le32(VM_CtBlockRead);
+               readcmd->cid = cpu_to_le32(cid);
+               readcmd->block = cpu_to_le32(lba);
+               readcmd->count = cpu_to_le32(count * 512);
+
+               if (count * 512 > (64 * 1024))
+                       BUG();
+
+               aac_build_sg(scsicmd, &readcmd->sg);
+               if(readcmd->sg.count > MAX_DRIVER_SG_SEGMENT_COUNT)
+                       BUG();
+               fibsize = sizeof(struct aac_read) + 
+                   ((readcmd->sg.count - 1) * sizeof (struct sgentry));
+               /*
+                *      Now send the Fib to the adapter
+                */
+               status = fib_send(ContainerCommand, 
+                         cmd_fibcontext, 
+                         fibsize, 
+                         FsaNormal, 
+                         0, 1, 
+                         (fib_callback) read_callback, 
+                         (void *) scsicmd);
+       }
+       
+       
+       /*
+        *      Check that the command queued to the controller
+        */
+       if (status == -EINPROGRESS) 
+               return 0;
+               
+       printk(KERN_WARNING "aac_read: fib_send failed with status: %d.\n", 
+              status);
+       /*
+        *      For some reason, the Fib didn't queue, return QUEUE_FULL
+        */
+       scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | QUEUE_FULL;
+       aac_io_done(scsicmd);
+       fib_complete(cmd_fibcontext);
+       fib_free(cmd_fibcontext);
+       return -1;
+}
+
+static int aac_write(Scsi_Cmnd * scsicmd, int cid)
+{
+       u32 lba;
+       u32 count;
+       int status;
+       u16 fibsize;
+       struct aac_dev *dev;
+       struct fib * cmd_fibcontext;
+
+       dev = (struct aac_dev *)scsicmd->host->hostdata;
+       /*
+        *      Get block address and transfer length
+        */
+       if (scsicmd->cmnd[0] == SS_WRITE)       /* 6 byte command */
+       {
+               lba = ((scsicmd->cmnd[1] & 0x1F) << 16) | (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3];
+               count = scsicmd->cmnd[4];
+               if (count == 0)
+                       count = 256;
+       } else {
+               dprintk((KERN_DEBUG "aachba: received a write(10) command on target %d.\n", cid));
+               lba = (scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16) | (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
+               count = (scsicmd->cmnd[7] << 8) | scsicmd->cmnd[8];
+       }
+       dprintk((KERN_DEBUG "aac_write[cpu %d]: lba = %u, t = %ld.\n", 
+                smp_processor_id(), lba, jiffies));
+       /*
+        *      Allocate and initialize a Fib then setup a BlockWrite command
+        */
+       if (!(cmd_fibcontext = fib_alloc(dev))) {
+               scsicmd->result = DID_ERROR << 16;
+               aac_io_done(scsicmd);
+               return -1;
+       }
+       fib_init(cmd_fibcontext);
+
+       if(dev->pae_support == 1)
+       {
+               struct aac_write64 *writecmd;
+               writecmd = (struct aac_write64 *) fib_data(cmd_fibcontext);
+               writecmd->command = cpu_to_le32(VM_CtHostWrite64);
+               writecmd->cid = cpu_to_le16(cid);
+               writecmd->sector_count = cpu_to_le16(count); 
+               writecmd->block = cpu_to_le32(lba);
+               writecmd->pad   = cpu_to_le16(0);
+               writecmd->flags = cpu_to_le16(0);
+
+               aac_build_sg64(scsicmd, &writecmd->sg);
+               if(writecmd->sg.count > MAX_DRIVER_SG_SEGMENT_COUNT)
+                       BUG();
+               fibsize = sizeof(struct aac_write64) + 
+                   ((writecmd->sg.count - 1) * sizeof (struct sgentry64));
+               /*
+                *      Now send the Fib to the adapter
+                */
+               status = fib_send(ContainerCommand64, 
+                         cmd_fibcontext, 
+                         fibsize, 
+                         FsaNormal, 
+                         0, 1, 
+                         (fib_callback) write_callback, 
+                         (void *) scsicmd);
+       }
+       else 
+       {
+               struct aac_write *writecmd;
+               writecmd = (struct aac_write *) fib_data(cmd_fibcontext);
+               writecmd->command = cpu_to_le32(VM_CtBlockWrite);
+               writecmd->cid = cpu_to_le32(cid);
+               writecmd->block = cpu_to_le32(lba);
+               writecmd->count = cpu_to_le32(count * 512);
+               writecmd->sg.count = cpu_to_le32(1);
+               /* ->stable is not used - it did mean which type of write */
+
+               if (count * 512 > (64 * 1024))
+                       BUG();
+               aac_build_sg(scsicmd, &writecmd->sg);
+               if(writecmd->sg.count > MAX_DRIVER_SG_SEGMENT_COUNT)
+                       BUG();
+               fibsize = sizeof(struct aac_write) + 
+                   ((writecmd->sg.count - 1) * sizeof (struct sgentry));
+               /*
+                *      Now send the Fib to the adapter
+                */
+               status = fib_send(ContainerCommand, 
+                         cmd_fibcontext, 
+                         fibsize, 
+                         FsaNormal, 
+                         0, 1, 
+                         (fib_callback) write_callback, 
+                         (void *) scsicmd);
+       }
+
+       /*
+        *      Check that the command queued to the controller
+        */
+       if (status == -EINPROGRESS)
+               return 0;
+
+       printk(KERN_WARNING "aac_write: fib_send failed with status: %d\n", status);
+       /*
+        *      For some reason, the Fib didn't queue, return QUEUE_FULL
+        */
+       scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | QUEUE_FULL;
+       aac_io_done(scsicmd);
+
+       fib_complete(cmd_fibcontext);
+       fib_free(cmd_fibcontext);
+       return -1;
+}
+
+
+/**
+ *     aac_scsi_cmd()          -       Process SCSI command
+ *     @scsicmd:               SCSI command block
+ *     @wait:                  1 if the user wants to await completion
+ *
+ *     Emulate a SCSI command and queue the required request for the
+ *     aacraid firmware.
+ */
+int aac_scsi_cmd(Scsi_Cmnd * scsicmd)
+{
+    u32 cid = 0;
+    struct fsa_scsi_hba *fsa_dev_ptr;
+    int cardtype;
+    int ret;
+    struct aac_dev *dev = (struct aac_dev *)scsicmd->host->hostdata;
+    
+    cardtype = dev->cardtype;
+
+    fsa_dev_ptr = fsa_dev[scsicmd->host->unique_id];
+    
+    /*
+     * If the bus, target or lun is out of range, return fail
+     * Test does not apply to ID 16, the pseudo id for the controller
+     * itself.
+     */
+    if (scsicmd->target != scsicmd->host->this_id) {
+       if ((scsicmd->channel == 0) ){
+           if( (scsicmd->target >= AAC_MAX_TARGET) || (scsicmd->lun != 0)){ 
+               scsicmd->result = DID_NO_CONNECT << 16;
+               __aac_io_done(scsicmd);
+               return 0;
+           }
+           cid = TARGET_LUN_TO_CONTAINER(scsicmd->target, scsicmd->lun);
+           
+           /*
+            *  If the target container doesn't exist, it may have
+            *  been newly created
+            */
+           if (fsa_dev_ptr->valid[cid] == 0) {
+               switch (scsicmd->cmnd[0]) {
+               case SS_INQUIR:
+               case SS_RDCAP:
+               case SS_TEST:
+                   spin_unlock_irq(&io_request_lock);
+                   probe_container(dev, cid);
+                   spin_lock_irq(&io_request_lock);
+                   if (fsa_dev_ptr->valid[cid] == 0) {
+                       scsicmd->result = DID_NO_CONNECT << 16;
+                       __aac_io_done(scsicmd);
+                       return 0;
+                   }
+               default:
+                   break;
+               }
+           }
+           /*
+            *  If the target container still doesn't exist, 
+            *  return failure
+            */
+           if (fsa_dev_ptr->valid[cid] == 0) {
+               scsicmd->result = DID_BAD_TARGET << 16;
+               __aac_io_done(scsicmd);
+               return -1;
+                       }
+       } else {  /* check for physical non-dasd devices */
+           if(dev->nondasd_support == 1){
+               return aac_send_srb_fib(scsicmd);
+           } else {
+               scsicmd->result = DID_NO_CONNECT << 16;
+               __aac_io_done(scsicmd);
+               return 0;
+           }
+       }
+    }
+    /*
+     * else Command for the controller itself
+     */
+    else if ((scsicmd->cmnd[0] != SS_INQUIR) &&        
+            (scsicmd->cmnd[0] != SS_TEST)) 
+    {
+       /* only INQUIRY & TUR cmnd supported for controller */
+       dprintk((KERN_WARNING "Only INQUIRY & TUR command supported for "
+                "controller, rcvd = 0x%x.\n", scsicmd->cmnd[0]));
+       scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | 
+           CHECK_CONDITION;
+       set_sense((u8 *) &sense_data[cid],
+                 SENKEY_ILLEGAL,
+                 SENCODE_INVALID_COMMAND,
+                 ASENCODE_INVALID_COMMAND, 0, 0, 0, 0);
+       __aac_io_done(scsicmd);
+       return -1;
+    }
+    
+    
+    /* Handle commands here that don't require going out to the adapter */
+    switch (scsicmd->cmnd[0]) {
+    case SS_INQUIR:
+    {
+       struct inquiry_data *inq_data_ptr;
+       
+       dprintk((KERN_DEBUG "INQUIRY command, ID: %d.\n", scsicmd->target));
+       inq_data_ptr = (struct inquiry_data *)scsicmd->request_buffer;
+       memset(inq_data_ptr, 0, sizeof (struct inquiry_data));
+       
+       inq_data_ptr->inqd_ver = 2;     /* claim compliance to SCSI-2 */
+       inq_data_ptr->inqd_dtq = 0x80;  /* set RMB bit to one indicating that the medium is removable */
+       inq_data_ptr->inqd_rdf = 2;     /* A response data format value of two indicates that the data shall be in the format specified in SCSI-2 */
+       inq_data_ptr->inqd_len = 31;
+       /*Format for "pad2" is  RelAdr | WBus32 | WBus16 |  Sync  | Linked |Reserved| CmdQue | SftRe */
+       inq_data_ptr->inqd_pad2= 0x32 ;  /*WBus16|Sync|CmdQue */
+       /*
+        *      Set the Vendor, Product, and Revision Level
+        *      see: <vendor>.c i.e. aac.c
+        */
+       setinqstr(cardtype, (void *) (inq_data_ptr->inqd_vid), fsa_dev_ptr->type[cid]);
+       if (scsicmd->target == scsicmd->host->this_id)
+           inq_data_ptr->inqd_pdt = INQD_PDT_PROC;     /* Processor device */
+       else
+           inq_data_ptr->inqd_pdt = INQD_PDT_DA;       /* Direct/random access device */
+       scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | GOOD;
+       __aac_io_done(scsicmd);
+       return 0;
+    }
+    case SS_RDCAP:
+    {
+       int capacity;
+       char *cp;
+       
+       dprintk((KERN_DEBUG "READ CAPACITY command.\n"));
+       capacity = fsa_dev_ptr->size[cid] - 1;
+       cp = scsicmd->request_buffer;
+       cp[0] = (capacity >> 24) & 0xff;
+       cp[1] = (capacity >> 16) & 0xff;
+       cp[2] = (capacity >> 8) & 0xff;
+       cp[3] = (capacity >> 0) & 0xff;
+       cp[4] = 0;
+       cp[5] = 0;
+       cp[6] = 2;
+       cp[7] = 0;
+       
+       scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | GOOD;
+       __aac_io_done(scsicmd);
+       
+       return 0;
+    }
+    
+    case SS_MODESEN:
+    {
+       char *mode_buf;
+       
+       dprintk((KERN_DEBUG "MODE SENSE command.\n"));
+       mode_buf = scsicmd->request_buffer;
+       mode_buf[0] = 0;  /* Mode data length (MSB) */
+       mode_buf[1] = 6;  /* Mode data length (LSB) */
+       mode_buf[2] = 0;  /* Medium type - default */
+       mode_buf[3] = 0;  /* Device-specific param, 
+                            bit 8: 0/1 = write enabled/protected */
+       mode_buf[4] = 0;  /* reserved */
+       mode_buf[5] = 0;  /* reserved */
+       mode_buf[6] = 0;  /* Block descriptor length (MSB) */
+       mode_buf[7] = 0;  /* Block descriptor length (LSB) */
+       
+       scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | GOOD;
+       __aac_io_done(scsicmd);
+       
+       return 0;
+    }
+    case SS_REQSEN:
+       dprintk((KERN_DEBUG "REQUEST SENSE command.\n"));
+       memcpy(scsicmd->sense_buffer, &sense_data[cid], 
+              sizeof (struct sense_data));
+       memset(&sense_data[cid], 0, sizeof (struct sense_data));
+       scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | GOOD;
+       __aac_io_done(scsicmd);
+       return (0);
+       
+    case SS_LOCK:
+       dprintk((KERN_DEBUG "LOCK command.\n"));
+       if (scsicmd->cmnd[4])
+           fsa_dev_ptr->locked[cid] = 1;
+       else
+           fsa_dev_ptr->locked[cid] = 0;
+       
+       scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | GOOD;
+       __aac_io_done(scsicmd);
+       return 0;
+       /*
+        *      These commands are all No-Ops
+        */
+    case SS_TEST:
+    case SS_RESERV:
+    case SS_RELES:
+    case SS_REZERO:
+    case SS_REASGN:
+    case SS_SEEK:
+    case SS_ST_SP:
+       scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | GOOD;
+       __aac_io_done(scsicmd);
+       return (0);
+    }
+    
+    switch (scsicmd->cmnd[0]) 
+    {
+    case SS_READ:
+    case SM_READ:
+       /*
+        *      Hack to keep track of ordinal number of the device that
+        *      corresponds to a container. Needed to convert
+        *      containers to /dev/sd device names
+        */
+       
+       spin_unlock_irq(&io_request_lock);
+       fsa_dev_ptr->devno[cid] = DEVICE_NR(scsicmd->request.rq_dev);
+       ret = aac_read(scsicmd, cid);
+       spin_lock_irq(&io_request_lock);
+       return ret;
+       
+    case SS_WRITE:
+    case SM_WRITE:
+       spin_unlock_irq(&io_request_lock);
+       ret = aac_write(scsicmd, cid);
+       spin_lock_irq(&io_request_lock);
+       return ret;
+    default:
+       /*
+        *      Unhandled commands
+        */
+       printk(KERN_WARNING "Unhandled SCSI Command: 0x%x.\n", 
+              scsicmd->cmnd[0]);
+       scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | 
+           CHECK_CONDITION;
+       set_sense((u8 *) &sense_data[cid],
+                 SENKEY_ILLEGAL, SENCODE_INVALID_COMMAND,
+                 ASENCODE_INVALID_COMMAND, 0, 0, 0, 0);
+       __aac_io_done(scsicmd);
+       return -1;
+    }
+}
+
+static int query_disk(struct aac_dev *dev, void *arg)
+{
+    struct aac_query_disk qd;
+    struct fsa_scsi_hba *fsa_dev_ptr;
+    
+    fsa_dev_ptr = &(dev->fsa_dev);
+    if (copy_from_user(&qd, arg, sizeof (struct aac_query_disk)))
+       return -EFAULT;
+    if (qd.cnum == -1)
+       qd.cnum = TARGET_LUN_TO_CONTAINER(qd.target, qd.lun);
+    else if ((qd.bus == -1) && (qd.target == -1) && (qd.lun == -1)) 
+    {
+       if (qd.cnum < 0 || qd.cnum > MAXIMUM_NUM_CONTAINERS)
+           return -EINVAL;
+       qd.instance = dev->scsi_host_ptr->host_no;
+       qd.bus = 0;
+       qd.target = CONTAINER_TO_TARGET(qd.cnum);
+       qd.lun = CONTAINER_TO_LUN(qd.cnum);
+    }
+    else return -EINVAL;
+    
+    qd.valid = fsa_dev_ptr->valid[qd.cnum];
+    qd.locked = fsa_dev_ptr->locked[qd.cnum];
+    qd.deleted = fsa_dev_ptr->deleted[qd.cnum];
+    
+    if (fsa_dev_ptr->devno[qd.cnum] == -1)
+       qd.unmapped = 1;
+    else
+       qd.unmapped = 0;
+    
+    get_sd_devname(fsa_dev_ptr->devno[qd.cnum], qd.name);
+    
+    if (copy_to_user(arg, &qd, sizeof (struct aac_query_disk)))
+       return -EFAULT;
+    return 0;
+}
+
+static void get_sd_devname(int disknum, char *buffer)
+{
+    if (disknum < 0) {
+       sprintf(buffer, "%s", "");
+       return;
+    }
+    
+    if (disknum < 26)
+       sprintf(buffer, "sd%c", 'a' + disknum);
+    else {
+       unsigned int min1;
+       unsigned int min2;
+       /*
+        * For larger numbers of disks, we need to go to a new
+        * naming scheme.
+        */
+       min1 = disknum / 26;
+       min2 = disknum % 26;
+       sprintf(buffer, "sd%c%c", 'a' + min1 - 1, 'a' + min2);
+    }
+}
+
+static int force_delete_disk(struct aac_dev *dev, void *arg)
+{
+    struct aac_delete_disk dd;
+    struct fsa_scsi_hba *fsa_dev_ptr;
+    
+    fsa_dev_ptr = &(dev->fsa_dev);
+    
+    if (copy_from_user(&dd, arg, sizeof (struct aac_delete_disk)))
+       return -EFAULT;
+    
+    if (dd.cnum > MAXIMUM_NUM_CONTAINERS)
+       return -EINVAL;
+    /*
+     * Mark this container as being deleted.
+     */
+    fsa_dev_ptr->deleted[dd.cnum] = 1;
+    /*
+     * Mark the container as no longer valid
+     */
+    fsa_dev_ptr->valid[dd.cnum] = 0;
+    return 0;
+}
+
+static int delete_disk(struct aac_dev *dev, void *arg)
+{
+    struct aac_delete_disk dd;
+    struct fsa_scsi_hba *fsa_dev_ptr;
+
+    fsa_dev_ptr = &(dev->fsa_dev);
+
+    if (copy_from_user(&dd, arg, sizeof (struct aac_delete_disk)))
+       return -EFAULT;
+
+    if (dd.cnum > MAXIMUM_NUM_CONTAINERS)
+       return -EINVAL;
+    /*
+     * If the container is locked, it can not be deleted by the API.
+     */
+    if (fsa_dev_ptr->locked[dd.cnum])
+       return -EBUSY;
+    else {
+       /*
+        *      Mark the container as no longer being valid.
+        */
+       fsa_dev_ptr->valid[dd.cnum] = 0;
+       fsa_dev_ptr->devno[dd.cnum] = -1;
+       return 0;
+    }
+}
+
+int aac_dev_ioctl(struct aac_dev *dev, int cmd, void *arg)
+{
+    switch (cmd) {
+    case FSACTL_QUERY_DISK:
+       return query_disk(dev, arg);
+    case FSACTL_DELETE_DISK:
+       return delete_disk(dev, arg);
+    case FSACTL_FORCE_DELETE_DISK:
+       return force_delete_disk(dev, arg);
+    case 2131:
+       return aac_get_containers(dev);
+    default:
+       return -ENOTTY;
+    }
+}
+
+/**
+ *
+ * aac_srb_callback
+ * @context: the context set in the fib - here it is scsi cmd
+ * @fibptr: pointer to the fib
+ *
+ * Handles the completion of a scsi command to a non dasd device
+ *
+ */
+
+static void aac_srb_callback(void *context, struct fib * fibptr)
+{
+    struct aac_dev *dev;
+    struct aac_srb_reply *srbreply;
+    Scsi_Cmnd *scsicmd;
+
+    scsicmd = (Scsi_Cmnd *) context;
+    dev = (struct aac_dev *)scsicmd->host->hostdata;
+
+    if (fibptr == NULL)
+       BUG();
+
+    srbreply = (struct aac_srb_reply *) fib_data(fibptr);
+
+    scsicmd->sense_buffer[0] = '\0';  // initialize sense valid flag to false
+    // calculate resid for sg 
+    scsicmd->resid = scsicmd->request_bufflen - srbreply->data_xfer_length;
+
+    if(scsicmd->use_sg)
+       pci_unmap_sg(dev->pdev, 
+                    (struct scatterlist *)scsicmd->buffer,
+                    scsicmd->use_sg,
+                    scsi_to_pci_dma_dir(scsicmd->sc_data_direction));
+    else if(scsicmd->request_bufflen)
+       pci_unmap_single(dev->pdev, (ulong)scsicmd->SCp.ptr, 
+                        scsicmd->request_bufflen,
+                        scsi_to_pci_dma_dir(scsicmd->sc_data_direction));
+
+    /*
+     * First check the fib status
+     */
+
+    if (le32_to_cpu(srbreply->status) != ST_OK){
+       int len;
+       printk(KERN_WARNING "aac_srb_callback: srb failed, status = %d\n", 
+              le32_to_cpu(srbreply->status));
+       len = (srbreply->sense_data_size > sizeof(scsicmd->sense_buffer))?
+           sizeof(scsicmd->sense_buffer):srbreply->sense_data_size;
+       scsicmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8 | 
+           CHECK_CONDITION;
+       memcpy(scsicmd->sense_buffer, srbreply->sense_data, len);
+    }
+
+    /*
+     * Next check the srb status
+     */
+    switch(le32_to_cpu(srbreply->srb_status)){
+    case SRB_STATUS_ERROR_RECOVERY:
+    case SRB_STATUS_PENDING:
+    case SRB_STATUS_SUCCESS:
+       if(scsicmd->cmnd[0] == INQUIRY ){
+           u8 b;
+           /* We can't expose disk devices because we can't tell whether they
+            * are the raw container drives or stand alone drives
+            */
+           b = *(u8*)scsicmd->buffer;
+           if( (b & 0x0f) == TYPE_DISK ){
+               scsicmd->result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8;
+           }
+       } else {
+           scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
+       }
+       break;
+    case SRB_STATUS_DATA_OVERRUN:
+       switch(scsicmd->cmnd[0]){
+       case  READ_6:
+       case  WRITE_6:
+       case  READ_10:
+       case  WRITE_10:
+       case  READ_12:
+       case  WRITE_12:
+           if(le32_to_cpu(srbreply->data_xfer_length) < scsicmd->underflow ) {
+               printk(KERN_WARNING"aacraid: SCSI CMD underflow\n");
+           } else {
+               printk(KERN_WARNING"aacraid: SCSI CMD Data Overrun\n");
+           }
+           scsicmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8;
+           break;
+       default:
+           scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
+           break;
+       }
+       break;
+    case SRB_STATUS_ABORTED:
+       scsicmd->result = DID_ABORT << 16 | ABORT << 8;
+       break;
+    case SRB_STATUS_ABORT_FAILED:
+       // Not sure about this one - but assuming the hba was trying 
+       // to abort for some reason
+       scsicmd->result = DID_ERROR << 16 | ABORT << 8;
+       break;
+    case SRB_STATUS_PARITY_ERROR:
+       scsicmd->result = DID_PARITY << 16 | MSG_PARITY_ERROR << 8;
+       break;
+    case SRB_STATUS_NO_DEVICE:
+    case SRB_STATUS_INVALID_PATH_ID:
+    case SRB_STATUS_INVALID_TARGET_ID:
+    case SRB_STATUS_INVALID_LUN:
+    case SRB_STATUS_SELECTION_TIMEOUT:
+       scsicmd->result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8;
+       break;
+
+    case SRB_STATUS_COMMAND_TIMEOUT:
+    case SRB_STATUS_TIMEOUT:
+       scsicmd->result = DID_TIME_OUT << 16 | COMMAND_COMPLETE << 8;
+       break;
+
+    case SRB_STATUS_BUSY:
+       scsicmd->result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8;
+       break;
+
+    case SRB_STATUS_BUS_RESET:
+       scsicmd->result = DID_RESET << 16 | COMMAND_COMPLETE << 8;
+       break;
+
+    case SRB_STATUS_MESSAGE_REJECTED:
+       scsicmd->result = DID_ERROR << 16 | MESSAGE_REJECT << 8;
+       break;
+    case SRB_STATUS_REQUEST_FLUSHED:
+    case SRB_STATUS_ERROR:
+    case SRB_STATUS_INVALID_REQUEST:
+    case SRB_STATUS_REQUEST_SENSE_FAILED:
+    case SRB_STATUS_NO_HBA:
+    case SRB_STATUS_UNEXPECTED_BUS_FREE:
+    case SRB_STATUS_PHASE_SEQUENCE_FAILURE:
+    case SRB_STATUS_BAD_SRB_BLOCK_LENGTH:
+    case SRB_STATUS_DELAYED_RETRY:
+    case SRB_STATUS_BAD_FUNCTION:
+    case SRB_STATUS_NOT_STARTED:
+    case SRB_STATUS_NOT_IN_USE:
+    case SRB_STATUS_FORCE_ABORT:
+    case SRB_STATUS_DOMAIN_VALIDATION_FAIL:
+    default:
+#ifdef AAC_DETAILED_STATUS_INFO
+       printk("aacraid: SRB ERROR (%s)\n", 
+              aac_get_status_string(le32_to_cpu(srbreply->srb_status)));
+#endif
+       scsicmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8;
+       break;
+    }
+    if (le32_to_cpu(srbreply->scsi_status) == 0x02 ){  // Check Condition
+       int len;
+       len = (srbreply->sense_data_size > sizeof(scsicmd->sense_buffer))?
+           sizeof(scsicmd->sense_buffer):srbreply->sense_data_size;
+       printk(KERN_WARNING "aac_srb_callback: check condition, "
+              "status = %d len=%d\n", le32_to_cpu(srbreply->status), len);
+       memcpy(scsicmd->sense_buffer, srbreply->sense_data, len);
+    }
+    /*
+     * OR in the scsi status (already shifted up a bit)
+     */
+    scsicmd->result |= le32_to_cpu(srbreply->scsi_status);
+
+    fib_complete(fibptr);
+    fib_free(fibptr);
+    aac_io_done(scsicmd);
+}
+
+/**
+ *
+ * aac_send_scb_fib
+ * @scsicmd: the scsi command block
+ *
+ * This routine will form a FIB and fill in the aac_srb from the 
+ * scsicmd passed in.
+ */
+
+static int aac_send_srb_fib(Scsi_Cmnd* scsicmd)
+{
+    struct fib* cmd_fibcontext;
+    struct aac_dev* dev;
+    int status;
+    struct aac_srb *srbcmd;
+    u16 fibsize;
+    u32 flag;
+
+    if( scsicmd->target > 15 || scsicmd->lun > 7) {
+       scsicmd->result = DID_NO_CONNECT << 16;
+       __aac_io_done(scsicmd);
+       return 0;
+    }
+
+    dev = (struct aac_dev *)scsicmd->host->hostdata;
+    switch(scsicmd->sc_data_direction){
+    case SCSI_DATA_WRITE:
+       flag = SRB_DataOut;
+       break;
+    case SCSI_DATA_UNKNOWN:  
+       flag = SRB_DataIn | SRB_DataOut;
+       break;
+    case SCSI_DATA_READ:
+       flag = SRB_DataIn;
+       break;
+    case SCSI_DATA_NONE: 
+    default:
+       flag = SRB_NoDataXfer;
+       break;
+    }
+
+
+    /*
+     * Allocate and initialize a Fib then setup a BlockWrite command
+     */
+    if (!(cmd_fibcontext = fib_alloc(dev))) {
+       scsicmd->result = DID_ERROR << 16;
+       __aac_io_done(scsicmd);
+       return -1;
+    }
+    fib_init(cmd_fibcontext);
+
+    srbcmd = (struct aac_srb*) fib_data(cmd_fibcontext);
+    srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi);
+    srbcmd->channel  = cpu_to_le32(aac_logical_to_phys(scsicmd->channel));
+    srbcmd->target   = cpu_to_le32(scsicmd->target);
+    srbcmd->lun      = cpu_to_le32(scsicmd->lun);
+    srbcmd->flags    = cpu_to_le32(flag);
+    srbcmd->timeout  = cpu_to_le32(0);  // timeout not used
+    srbcmd->retry_limit =cpu_to_le32(0); // Obsolete parameter
+    srbcmd->cdb_size = cpu_to_le32(scsicmd->cmd_len);
+       
+    if( dev->pae_support ==1 ) {
+       aac_build_sg64(scsicmd, (struct sgmap64*) &srbcmd->sg);
+       srbcmd->count = cpu_to_le32(scsicmd->request_bufflen);
+
+       memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb));
+       memcpy(srbcmd->cdb, scsicmd->cmnd, scsicmd->cmd_len);
+       /*
+        *      Build Scatter/Gather list
+        */
+       fibsize = sizeof (struct aac_srb) + (((srbcmd->sg.count & 0xff) - 1) 
+                                            * sizeof (struct sgentry64));
+
+       /*
+        *      Now send the Fib to the adapter
+        */
+       status = fib_send(ScsiPortCommand64, cmd_fibcontext, fibsize, 
+                         FsaNormal, 0, 1, (fib_callback) aac_srb_callback, 
+                         (void *) scsicmd);
+    } else {
+       aac_build_sg(scsicmd, (struct sgmap*)&srbcmd->sg);
+       srbcmd->count = cpu_to_le32(scsicmd->request_bufflen);
+
+       memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb));
+       memcpy(srbcmd->cdb, scsicmd->cmnd, scsicmd->cmd_len);
+       /*
+        *      Build Scatter/Gather list
+        */
+       fibsize = sizeof (struct aac_srb) + (((srbcmd->sg.count & 0xff) - 1) 
+                                            * sizeof (struct sgentry));
+
+       /*
+        *      Now send the Fib to the adapter
+        */
+       status = fib_send(ScsiPortCommand, cmd_fibcontext, fibsize, 
+                         FsaNormal, 0, 1, (fib_callback) aac_srb_callback, 
+                         (void *) scsicmd);
+    }
+    /*
+     * Check that the command queued to the controller
+     */
+    if (status == -EINPROGRESS){
+       return 0;
+    }
+
+    printk(KERN_WARNING "aac_srb: fib_send failed with status: %d\n", status);
+    /*
+     * For some reason, the Fib didn't queue, return QUEUE_FULL
+     */
+    scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | QUEUE_FULL;
+    __aac_io_done(scsicmd);
+
+    fib_complete(cmd_fibcontext);
+    fib_free(cmd_fibcontext);
+
+    return -1;
+}
+
+static unsigned long aac_build_sg(Scsi_Cmnd* scsicmd, struct sgmap* psg)
+{
+    struct aac_dev *dev;
+    unsigned long byte_count = 0;
+
+    dev = (struct aac_dev *)scsicmd->host->hostdata;
+    // Get rid of old data
+    psg->count = cpu_to_le32(0);
+    psg->sg[0].addr = cpu_to_le32(NULL);
+    psg->sg[0].count = cpu_to_le32(0);  
+    if (scsicmd->use_sg) {
+       struct scatterlist *sg;
+       int i;
+       int sg_count;
+       sg = (struct scatterlist *) scsicmd->request_buffer;
+
+       sg_count = pci_map_sg(dev->pdev, sg, scsicmd->use_sg,
+                             scsi_to_pci_dma_dir(scsicmd->sc_data_direction));
+       psg->count = cpu_to_le32(sg_count);
+
+       byte_count = 0;
+
+       for (i = 0; i < sg_count; i++) {
+           psg->sg[i].addr = cpu_to_le32(sg_dma_address(sg));
+           psg->sg[i].count = cpu_to_le32(sg_dma_len(sg));
+           byte_count += sg_dma_len(sg);
+           sg++;
+       }
+       /* hba wants the size to be exact */
+       if(byte_count > scsicmd->request_bufflen){
+           psg->sg[i-1].count -= (byte_count - scsicmd->request_bufflen);
+           byte_count = scsicmd->request_bufflen;
+       }
+       /* Check for command underflow */
+       if(scsicmd->underflow && (byte_count < scsicmd->underflow)){
+           printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n",
+                  byte_count, scsicmd->underflow);
+       }
+    }
+    else if(scsicmd->request_bufflen) {
+       dma_addr_t addr; 
+       addr = pci_map_single(dev->pdev,
+                             scsicmd->request_buffer,
+                             scsicmd->request_bufflen,
+                             scsi_to_pci_dma_dir(scsicmd->sc_data_direction));
+       psg->count = cpu_to_le32(1);
+       psg->sg[0].addr = cpu_to_le32(addr);
+       psg->sg[0].count = cpu_to_le32(scsicmd->request_bufflen);  
+       scsicmd->SCp.ptr = (void *)addr;
+       byte_count = scsicmd->request_bufflen;
+    }
+    return byte_count;
+}
+
+
+static unsigned long aac_build_sg64(Scsi_Cmnd* scsicmd, struct sgmap64* psg)
+{
+    struct aac_dev *dev;
+    unsigned long byte_count = 0;
+    u64 le_addr;
+
+    dev = (struct aac_dev *)scsicmd->host->hostdata;
+    // Get rid of old data
+    psg->count = cpu_to_le32(0);
+    psg->sg[0].addr[0] = cpu_to_le32(NULL);
+    psg->sg[0].addr[1] = cpu_to_le32(NULL);
+    psg->sg[0].count = cpu_to_le32(0);  
+    if (scsicmd->use_sg) {
+       struct scatterlist *sg;
+       int i;
+       int sg_count;
+       sg = (struct scatterlist *) scsicmd->request_buffer;
+
+       sg_count = pci_map_sg(dev->pdev, sg, scsicmd->use_sg,
+                             scsi_to_pci_dma_dir(scsicmd->sc_data_direction));
+       psg->count = cpu_to_le32(sg_count);
+
+       byte_count = 0;
+
+       for (i = 0; i < sg_count; i++) {
+           le_addr = cpu_to_le64(sg_dma_address(sg));
+           psg->sg[i].addr[1] = (u32)(le_addr>>32);
+           psg->sg[i].addr[0] = (u32)(le_addr & 0xffffffff);
+           psg->sg[i].count = cpu_to_le32(sg_dma_len(sg));
+           byte_count += sg_dma_len(sg);
+           sg++;
+       }
+       /* hba wants the size to be exact */
+       if(byte_count > scsicmd->request_bufflen){
+           psg->sg[i-1].count -= (byte_count - scsicmd->request_bufflen);
+           byte_count = scsicmd->request_bufflen;
+       }
+       /* Check for command underflow */
+       if(scsicmd->underflow && (byte_count < scsicmd->underflow)){
+           printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n",
+                  byte_count, scsicmd->underflow);
+       }
+    }
+    else if(scsicmd->request_bufflen) {
+       dma_addr_t addr; 
+       addr = pci_map_single(dev->pdev,
+                             scsicmd->request_buffer,
+                             scsicmd->request_bufflen,
+                             scsi_to_pci_dma_dir(scsicmd->sc_data_direction));
+       psg->count = cpu_to_le32(1);
+       le_addr = cpu_to_le64(addr);
+       psg->sg[0].addr[1] = (u32)(le_addr>>32);
+       psg->sg[0].addr[0] = (u32)(le_addr & 0xffffffff);
+       psg->sg[0].count = cpu_to_le32(scsicmd->request_bufflen);  
+       scsicmd->SCp.ptr = (void *)addr;
+       byte_count = scsicmd->request_bufflen;
+    }
+    return byte_count;
+}
+
+#ifdef AAC_DETAILED_STATUS_INFO
+
+struct aac_srb_status_info {
+       u32     status;
+       char    *str;
+};
+
+
+static struct aac_srb_status_info srb_status_info[] = {
+       { SRB_STATUS_PENDING,           "Pending Status"},
+       { SRB_STATUS_SUCCESS,           "Success"},
+       { SRB_STATUS_ABORTED,           "Aborted Command"},
+       { SRB_STATUS_ABORT_FAILED,      "Abort Failed"},
+       { SRB_STATUS_ERROR,             "Error Event"}, 
+       { SRB_STATUS_BUSY,              "Device Busy"},
+       { SRB_STATUS_INVALID_REQUEST,   "Invalid Request"},
+       { SRB_STATUS_INVALID_PATH_ID,   "Invalid Path ID"},
+       { SRB_STATUS_NO_DEVICE,         "No Device"},
+       { SRB_STATUS_TIMEOUT,           "Timeout"},
+       { SRB_STATUS_SELECTION_TIMEOUT, "Selection Timeout"},
+       { SRB_STATUS_COMMAND_TIMEOUT,   "Command Timeout"},
+       { SRB_STATUS_MESSAGE_REJECTED,  "Message Rejected"},
+       { SRB_STATUS_BUS_RESET,         "Bus Reset"},
+       { SRB_STATUS_PARITY_ERROR,      "Parity Error"},
+       { SRB_STATUS_REQUEST_SENSE_FAILED,"Request Sense Failed"},
+       { SRB_STATUS_NO_HBA,            "No HBA"},
+       { SRB_STATUS_DATA_OVERRUN,      "Data Overrun/Data Underrun"},
+       { SRB_STATUS_UNEXPECTED_BUS_FREE,"Unexpected Bus Free"},
+       { SRB_STATUS_PHASE_SEQUENCE_FAILURE,"Phase Error"},
+       { SRB_STATUS_BAD_SRB_BLOCK_LENGTH,"Bad Srb Block Length"},
+       { SRB_STATUS_REQUEST_FLUSHED,   "Request Flushed"},
+       { SRB_STATUS_DELAYED_RETRY,     "Delayed Retry"},
+       { SRB_STATUS_INVALID_LUN,       "Invalid LUN"}, 
+       { SRB_STATUS_INVALID_TARGET_ID, "Invalid TARGET ID"},
+       { SRB_STATUS_BAD_FUNCTION,      "Bad Function"},
+       { SRB_STATUS_ERROR_RECOVERY,    "Error Recovery"},
+       { SRB_STATUS_NOT_STARTED,       "Not Started"},
+       { SRB_STATUS_NOT_IN_USE,        "Not In Use"},
+       { SRB_STATUS_FORCE_ABORT,       "Force Abort"},
+       { SRB_STATUS_DOMAIN_VALIDATION_FAIL,"Domain Validation Failure"},
+       { 0xff,                         "Unknown Error"}
+};
+
+char *aac_get_status_string(u32 status)
+{
+       int i;
+
+       for(i=0; i < (sizeof(srb_status_info)/sizeof(struct aac_srb_status_info)); i++ ){
+               if(srb_status_info[i].status == status){
+                       return srb_status_info[i].str;
+               }
+       }
+
+       return "Bad Status Code";
+}
+
+#endif
diff --git a/xen-2.4.16/drivers/scsi/aacraid/aacraid.h b/xen-2.4.16/drivers/scsi/aacraid/aacraid.h
new file mode 100644 (file)
index 0000000..cbfee8a
--- /dev/null
@@ -0,0 +1,1419 @@
+
+/* #define dprintk(x) */
+// #define dprintk(x) printk x
+#define dprintk(x)
+
+
+#include <asm/byteorder.h>
+
+#define TRY_SOFTIRQ
+#ifdef TRY_SOFTIRQ
+/* XXX SMH: trying to use softirqs to trigger stuff done prev by threads */
+#include <xeno/interrupt.h>  /* for softirq stuff */
+#endif
+
+/*------------------------------------------------------------------------------
+ *              D E F I N E S
+ *----------------------------------------------------------------------------*/
+
+#define MAXIMUM_NUM_CONTAINERS 31
+#define MAXIMUM_NUM_ADAPTERS   8
+
+#define AAC_NUM_FIB    578
+#define AAC_NUM_IO_FIB 512
+
+#define AAC_MAX_TARGET (MAXIMUM_NUM_CONTAINERS+1)
+//#define AAC_MAX_TARGET       (16)
+#define AAC_MAX_LUN    (8)
+
+/*
+ * These macros convert from physical channels to virtual channels
+ */
+#define CONTAINER_CHANNEL      (0)
+#define aac_phys_to_logical(x)  (x+1)
+#define aac_logical_to_phys(x)  (x?x-1:0)
+
+#define AAC_DETAILED_STATUS_INFO
+
+struct diskparm
+{
+       int heads;
+       int sectors;
+       int cylinders;
+};
+
+
+/*
+ *     DON'T CHANGE THE ORDER, this is set by the firmware
+ */
+#define                CT_NONE                 0
+#define                CT_VOLUME               1
+#define                CT_MIRROR               2
+#define                CT_STRIPE               3
+#define                CT_RAID5                4
+#define                CT_SSRW                 5
+#define                CT_SSRO                 6
+#define                CT_MORPH                7
+#define                CT_PASSTHRU             8
+#define                CT_RAID4                9
+#define                CT_RAID10               10      /* stripe of mirror */
+#define                CT_RAID00               11      /* stripe of stripe */
+#define                CT_VOLUME_OF_MIRRORS    12      /* volume of mirror */
+#define                CT_PSEUDO_RAID          13      /* really raid4 */
+#define                CT_LAST_VOLUME_TYPE     14
+
+/*
+ *     Types of objects addressable in some fashion by the client.
+ *     This is a superset of those objects handled just by the filesystem
+ *     and includes "raw" objects that an administrator would use to
+ *     configure containers and filesystems.
+ */
+
+#define                FT_REG          1       /* regular file */
+#define                FT_DIR          2       /* directory */
+#define                FT_BLK          3       /* "block" device - reserved */
+#define                FT_CHR          4       /* "character special" device - reserved */
+#define                FT_LNK          5       /* symbolic link */
+#define                FT_SOCK         6       /* socket */
+#define                FT_FIFO         7       /* fifo */
+#define                FT_FILESYS      8       /* ADAPTEC's "FSA"(tm) filesystem */
+#define                FT_DRIVE        9       /* physical disk - addressable in scsi by bus/target/lun */
+#define                FT_SLICE        10      /* virtual disk - raw volume - slice */
+#define                FT_PARTITION    11      /* FSA partition - carved out of a slice - building block for containers */
+#define                FT_VOLUME       12      /* Container - Volume Set */
+#define                FT_STRIPE       13      /* Container - Stripe Set */
+#define                FT_MIRROR       14      /* Container - Mirror Set */
+#define                FT_RAID5        15      /* Container - Raid 5 Set */
+#define                FT_DATABASE     16      /* Storage object with "foreign" content manager */
+
+/*
+ *     Host side memory scatter gather list
+ *     Used by the adapter for read, write, and readdirplus operations
+ *     We have seperate 32 and 64 bit version because even
+ *     on 64 bit systems not all cards support the 64 bit version
+ */
+struct sgentry {
+       u32     addr;   /* 32-bit address. */
+       u32     count;  /* Length. */
+};
+
+struct sgentry64 {
+       u32     addr[2];        /* 64-bit addr. 2 pieces for data alignment */
+       u32     count;  /* Length. */
+};
+
+/*
+ *     SGMAP
+ *
+ *     This is the SGMAP structure for all commands that use
+ *     32-bit addressing.
+ */
+
+struct sgmap {
+       u32             count;
+       struct sgentry  sg[1]; 
+};
+
+struct sgmap64 {
+       u32             count;
+       struct sgentry64 sg[1];
+};
+
+struct creation_info
+{
+       u8              buildnum;               /* e.g., 588 */
+       u8              usec;                   /* e.g., 588 */
+       u8              via;                    /* e.g., 1 = FSU,
+                                                *       2 = API
+                                                */
+       u8              year;                   /* e.g., 1997 = 97 */
+       u32             date;                   /*
+                                                * unsigned     Month           :4;     // 1 - 12
+                                                * unsigned     Day             :6;     // 1 - 32
+                                                * unsigned     Hour            :6;     // 0 - 23
+                                                * unsigned     Minute          :6;     // 0 - 60
+                                                * unsigned     Second          :6;     // 0 - 60
+                                                */
+       u32             serial[2];                      /* e.g., 0x1DEADB0BFAFAF001 */
+};
+
+
+/*
+ *     Define all the constants needed for the communication interface
+ */
+
+/*
+ *     Define how many queue entries each queue will have and the total
+ *     number of entries for the entire communication interface. Also define
+ *     how many queues we support.
+ *
+ *     This has to match the controller
+ */
+
+#define NUMBER_OF_COMM_QUEUES  8   // 4 command; 4 response
+#define HOST_HIGH_CMD_ENTRIES  4
+#define HOST_NORM_CMD_ENTRIES  8
+#define ADAP_HIGH_CMD_ENTRIES  4
+#define ADAP_NORM_CMD_ENTRIES  512
+#define HOST_HIGH_RESP_ENTRIES 4
+#define HOST_NORM_RESP_ENTRIES 512
+#define ADAP_HIGH_RESP_ENTRIES 4
+#define ADAP_NORM_RESP_ENTRIES 8
+
+#define TOTAL_QUEUE_ENTRIES  \
+    (HOST_NORM_CMD_ENTRIES + HOST_HIGH_CMD_ENTRIES + ADAP_NORM_CMD_ENTRIES + ADAP_HIGH_CMD_ENTRIES + \
+           HOST_NORM_RESP_ENTRIES + HOST_HIGH_RESP_ENTRIES + ADAP_NORM_RESP_ENTRIES + ADAP_HIGH_RESP_ENTRIES)
+
+
+/*
+ *     Set the queues on a 16 byte alignment
+ */
+#define QUEUE_ALIGNMENT                16
+
+/*
+ *     The queue headers define the Communication Region queues. These
+ *     are physically contiguous and accessible by both the adapter and the
+ *     host. Even though all queue headers are in the same contiguous block
+ *     they will be represented as individual units in the data structures.
+ */
+
+struct aac_entry {
+       u32 size;          /* Size in bytes of Fib which this QE points to */
+       u32 addr; /* Receiver address of the FIB */
+};
+
+/*
+ *     The adapter assumes the ProducerIndex and ConsumerIndex are grouped
+ *     adjacently and in that order.
+ */
+struct aac_qhdr {
+       u64 header_addr;                /* Address to hand the adapter to access to this queue head */
+       u32 *producer;                  /* The producer index for this queue (host address) */
+       u32 *consumer;                  /* The consumer index for this queue (host address) */
+};
+
+/*
+ *     Define all the events which the adapter would like to notify
+ *     the host of.
+ */
+#define                HostNormCmdQue          1       /* Change in host normal priority command queue */
+#define                HostHighCmdQue          2       /* Change in host high priority command queue */
+#define                HostNormRespQue         3       /* Change in host normal priority response queue */
+#define                HostHighRespQue         4       /* Change in host high priority response queue */
+#define                AdapNormRespNotFull     5
+#define                AdapHighRespNotFull     6
+#define                AdapNormCmdNotFull      7
+#define                AdapHighCmdNotFull      8
+#define                SynchCommandComplete    9
+#define                AdapInternalError       0xfe    /* The adapter detected an internal error shutting down */
+
+/*
+ *     Define all the events the host wishes to notify the
+ *     adapter of. The first four values much match the Qid the
+ *     corresponding queue.
+ */
+
+#define                AdapNormCmdQue          2
+#define                AdapHighCmdQue          3
+#define                AdapNormRespQue         6
+#define                AdapHighRespQue         7
+#define                HostShutdown            8
+#define                HostPowerFail           9
+#define                FatalCommError          10
+#define                HostNormRespNotFull     11
+#define                HostHighRespNotFull     12
+#define                HostNormCmdNotFull      13
+#define                HostHighCmdNotFull      14
+#define                FastIo                  15
+#define                AdapPrintfDone          16
+
+/*
+ *     Define all the queues that the adapter and host use to communicate
+ *     Number them to match the physical queue layout.
+ */
+
+enum aac_queue_types {
+        HostNormCmdQueue = 0,  /* Adapter to host normal priority command traffic */
+        HostHighCmdQueue,      /* Adapter to host high priority command traffic */
+        AdapNormCmdQueue,      /* Host to adapter normal priority command traffic */
+        AdapHighCmdQueue,      /* Host to adapter high priority command traffic */
+        HostNormRespQueue,     /* Adapter to host normal priority response traffic */
+        HostHighRespQueue,     /* Adapter to host high priority response traffic */
+        AdapNormRespQueue,     /* Host to adapter normal priority response traffic */
+        AdapHighRespQueue      /* Host to adapter high priority response traffic */
+};
+
+/*
+ *     Assign type values to the FSA communication data structures
+ */
+
+#define                FIB_MAGIC       0x0001
+
+/*
+ *     Define the priority levels the FSA communication routines support.
+ */
+
+#define                FsaNormal       1
+#define                FsaHigh         2
+
+/*
+ * Define the FIB. The FIB is the where all the requested data and
+ * command information are put to the application on the FSA adapter.
+ */
+
+struct aac_fibhdr {
+    u32 XferState;             // Current transfer state for this CCB
+    u16 Command;               // Routing information for the destination
+    u8 StructType;             // Type FIB
+    u8 Flags;                  // Flags for FIB
+    u16 Size;                  // Size of this FIB in bytes
+    u16 SenderSize;            // Size of the FIB in the sender (for 
+                                // response sizing)
+    u32 SenderFibAddress;      // Host defined data in the FIB
+    u32 ReceiverFibAddress;    // Logical address of this FIB for the adapter
+    u32 SenderData;            // Place holder for the sender to store data
+    union {
+       struct {
+           u32 _ReceiverTimeStart;  // Timestamp for receipt of fib
+           u32 _ReceiverTimeDone;   // Timestamp for completion of fib
+       } _s;
+       struct list_head _FibLinks;  // Used to link Adapter Initiated 
+                                    // Fibs on the host
+    } _u;
+};
+
+#define FibLinks                       _u._FibLinks
+
+#define FIB_DATA_SIZE_IN_BYTES (512 - sizeof(struct aac_fibhdr))
+
+
+struct hw_fib {
+       struct aac_fibhdr header;
+       u8 data[FIB_DATA_SIZE_IN_BYTES];                // Command specific data
+};
+
+/*
+ *     FIB commands
+ */
+
+#define        TestCommandResponse             1
+#define                TestAdapterCommand              2
+/*
+ *     Lowlevel and comm commands
+ */
+#define                LastTestCommand                 100
+#define                ReinitHostNormCommandQueue      101
+#define                ReinitHostHighCommandQueue      102
+#define                ReinitHostHighRespQueue         103
+#define                ReinitHostNormRespQueue         104
+#define                ReinitAdapNormCommandQueue      105
+#define                ReinitAdapHighCommandQueue      107
+#define                ReinitAdapHighRespQueue         108
+#define                ReinitAdapNormRespQueue         109
+#define                InterfaceShutdown               110
+#define                DmaCommandFib                   120
+#define                StartProfile                    121
+#define                TermProfile                     122
+#define                SpeedTest                       123
+#define                TakeABreakPt                    124
+#define                RequestPerfData                 125
+#define                SetInterruptDefTimer            126
+#define                SetInterruptDefCount            127
+#define                GetInterruptDefStatus           128
+#define                LastCommCommand                 129
+/*
+ *     Filesystem commands
+ */
+#define                NuFileSystem                    300
+#define                UFS                             301
+#define                HostFileSystem                  302
+#define                LastFileSystemCommand           303
+/*
+ *     Container Commands
+ */
+#define                ContainerCommand                500
+#define                ContainerCommand64              501
+/*
+ *     Cluster Commands
+ */
+#define                ClusterCommand                  550
+/*
+ *     Scsi Port commands (scsi passthrough)
+ */
+#define                ScsiPortCommand                 600
+#define                ScsiPortCommand64               601
+/*
+ *     Misc house keeping and generic adapter initiated commands
+ */
+#define                AifRequest                      700
+#define                CheckRevision                   701
+#define                FsaHostShutdown                 702
+#define                RequestAdapterInfo              703
+#define                IsAdapterPaused                 704
+#define                SendHostTime                    705
+#define                LastMiscCommand                 706
+
+//
+// Commands that will target the failover level on the FSA adapter
+//
+
+enum fib_xfer_state {
+       HostOwned                       = (1<<0),
+       AdapterOwned                    = (1<<1),
+       FibInitialized                  = (1<<2),
+       FibEmpty                        = (1<<3),
+       AllocatedFromPool               = (1<<4),
+       SentFromHost                    = (1<<5),
+       SentFromAdapter                 = (1<<6),
+       ResponseExpected                = (1<<7),
+       NoResponseExpected              = (1<<8),
+       AdapterProcessed                = (1<<9),
+       HostProcessed                   = (1<<10),
+       HighPriority                    = (1<<11),
+       NormalPriority                  = (1<<12),
+       Async                           = (1<<13),
+       AsyncIo                         = (1<<13),      // rpbfix: remove with new regime
+       PageFileIo                      = (1<<14),      // rpbfix: remove with new regime
+       ShutdownRequest                 = (1<<15),
+       LazyWrite                       = (1<<16),      // rpbfix: remove with new regime
+       AdapterMicroFib                 = (1<<17),
+       BIOSFibPath                     = (1<<18),
+       FastResponseCapable             = (1<<19),
+       ApiFib                          = (1<<20)       // Its an API Fib.
+};
+
+/*
+ *     The following defines needs to be updated any time there is an
+ *     incompatible change made to the aac_init structure.
+ */
+
+#define ADAPTER_INIT_STRUCT_REVISION           3
+
+struct aac_init
+{
+       u32     InitStructRevision;
+       u32     MiniPortRevision;
+       u32     fsrev;
+       u32     CommHeaderAddress;
+       u32     FastIoCommAreaAddress;
+       u32     AdapterFibsPhysicalAddress;
+       u32     AdapterFibsVirtualAddress;
+       u32     AdapterFibsSize;
+       u32     AdapterFibAlign;
+       u32     printfbuf;
+       u32     printfbufsiz;
+       u32     HostPhysMemPages;               // number of 4k pages of host physical memory
+       u32     HostElapsedSeconds;             // number of seconds since 1970.
+};
+
+enum aac_log_level {
+       LOG_INIT                        = 10,
+       LOG_INFORMATIONAL               = 20,
+       LOG_WARNING                     = 30,
+       LOG_LOW_ERROR                   = 40,
+       LOG_MEDIUM_ERROR                = 50,
+       LOG_HIGH_ERROR                  = 60,
+       LOG_PANIC                       = 70,
+       LOG_DEBUG                       = 80,
+       LOG_WINDBG_PRINT                = 90
+};
+
+#define FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT      0x030b
+#define FSAFS_NTC_FIB_CONTEXT                  0x030c
+
+struct aac_dev;
+
+struct adapter_ops
+{
+       void (*adapter_interrupt)(struct aac_dev *dev);
+       void (*adapter_notify)(struct aac_dev *dev, u32 event);
+       void (*adapter_enable_int)(struct aac_dev *dev, u32 event);
+       void (*adapter_disable_int)(struct aac_dev *dev, u32 event);
+       int  (*adapter_sync_cmd)(struct aac_dev *dev, u32 command, u32 p1, u32 *status);
+};
+
+/*
+ *     Define which interrupt handler needs to be installed
+ */
+
+struct aac_driver_ident
+{
+       u16     vendor;
+       u16     device;
+       u16     subsystem_vendor;
+       u16     subsystem_device;
+       int     (*init)(struct aac_dev *dev, unsigned long num);
+       char *  name;
+       char *  vname;
+       char *  model;
+       u16     channels;
+};
+
+/*
+ *     The adapter interface specs all queues to be located in the same
+ *     physically contigous block. The host structure that defines the
+ *     commuication queues will assume they are each a seperate physically
+ *     contigous memory region that will support them all being one big
+ *     contigous block. 
+ *     There is a command and response queue for each level and direction of
+ *     commuication. These regions are accessed by both the host and adapter.
+ */
+struct aac_queue {
+       u64                     logical;                /* This is the address we give the adapter */
+       struct aac_entry        *base;                  /* This is the system virtual address */
+       struct aac_qhdr         headers;                /* A pointer to the producer and consumer queue headers for this queue */
+       u32                     entries;                /* Number of queue entries on this queue */
+#if 0
+       wait_queue_head_t       qfull;                  /* Event to wait on if the queue is full */
+       wait_queue_head_t       cmdready;               /* Indicates there is a Command ready from the adapter on this queue. */
+#endif
+                                                       /* This is only valid for adapter to host command queues. */                      
+       spinlock_t              *lock;                  /* Spinlock for this queue must take this lock before accessing the lock */
+       spinlock_t              lockdata;               /* Actual lock (used only on one side of the lock) */
+       unsigned long           SavedIrql;              /* Previous IRQL when the spin lock is taken */
+       u32                     padding;                /* Padding - FIXME - can remove I believe */
+       struct list_head        cmdq;                   /* A queue of FIBs which need to be prcessed by the FS thread. This is */
+                                                       /* only valid for command queues which receive entries from the adapter. */
+       struct list_head        pendingq;               /* A queue of outstanding fib's to the adapter. */
+       unsigned long           numpending;             /* Number of entries on outstanding queue. */
+       struct aac_dev *        dev;                    /* Back pointer to adapter structure */
+};
+
+/*
+ *     Message queues. The order here is important, see also the 
+ *     queue type ordering
+ */
+
+struct aac_queue_block
+{
+       struct aac_queue queue[8];
+};
+
+/*
+ *     SaP1 Message Unit Registers
+ */
+struct sa_drawbridge_CSR {
+                                               //       Offset |       Name
+       u32     reserved[10];                   //      00h-27h |   Reserved
+       u8      LUT_Offset;                     //      28h     |       Looup Table Offset
+       u8      reserved1[3];                   //      29h-2bh |       Reserved
+       u32     LUT_Data;                       //      2ch     |       Looup Table Data        
+       u32     reserved2[26];                  //      30h-97h |       Reserved
+       u16     PRICLEARIRQ;                    //      98h     |       Primary Clear Irq
+       u16     SECCLEARIRQ;                    //      9ah     |       Secondary Clear Irq
+       u16     PRISETIRQ;                      //      9ch     |       Primary Set Irq
+       u16     SECSETIRQ;                      //      9eh     |       Secondary Set Irq
+       u16     PRICLEARIRQMASK;                //      a0h     |       Primary Clear Irq Mask
+       u16     SECCLEARIRQMASK;                //      a2h     |       Secondary Clear Irq Mask
+       u16     PRISETIRQMASK;                  //      a4h     |       Primary Set Irq Mask
+       u16     SECSETIRQMASK;                  //      a6h     |       Secondary Set Irq Mask
+       u32     MAILBOX0;                       //      a8h     |       Scratchpad 0
+       u32     MAILBOX1;                       //      ach     |       Scratchpad 1
+       u32     MAILBOX2;                       //      b0h     |       Scratchpad 2
+       u32     MAILBOX3;                       //      b4h     |       Scratchpad 3
+       u32     MAILBOX4;                       //      b8h     |       Scratchpad 4
+       u32     MAILBOX5;                       //      bch     |       Scratchpad 5
+       u32     MAILBOX6;                       //      c0h     |       Scratchpad 6
+       u32     MAILBOX7;                       //      c4h     |       Scratchpad 7
+
+       u32     ROM_Setup_Data;                 //      c8h |   Rom Setup and Data
+       u32     ROM_Control_Addr;               //      cch |   Rom Control and Address
+
+       u32     reserved3[12];                  //      d0h-ffh |       reserved
+       u32     LUT[64];                        // 100h-1ffh|   Lookup Table Entries
+
+       //
+       //  TO DO
+       //      need to add DMA, I2O, UART, etc registers form 80h to 364h
+       //
+
+};
+
+#define Mailbox0       SaDbCSR.MAILBOX0
+#define Mailbox1       SaDbCSR.MAILBOX1
+#define Mailbox2       SaDbCSR.MAILBOX2
+#define Mailbox3       SaDbCSR.MAILBOX3
+#define Mailbox4       SaDbCSR.MAILBOX4
+#define Mailbox5       SaDbCSR.MAILBOX5
+#define Mailbox7       SaDbCSR.MAILBOX7
+       
+#define DoorbellReg_p SaDbCSR.PRISETIRQ
+#define DoorbellReg_s SaDbCSR.SECSETIRQ
+#define DoorbellClrReg_p SaDbCSR.PRICLEARIRQ
+
+
+#define        DOORBELL_0      cpu_to_le16(0x0001)
+#define DOORBELL_1     cpu_to_le16(0x0002)
+#define DOORBELL_2     cpu_to_le16(0x0004)
+#define DOORBELL_3     cpu_to_le16(0x0008)
+#define DOORBELL_4     cpu_to_le16(0x0010)
+#define DOORBELL_5     cpu_to_le16(0x0020)
+#define DOORBELL_6     cpu_to_le16(0x0040)
+
+       
+#define PrintfReady    DOORBELL_5
+#define PrintfDone     DOORBELL_5
+       
+struct sa_registers {
+       struct sa_drawbridge_CSR        SaDbCSR;                        /* 98h - c4h */
+};
+       
+
+#define Sa_MINIPORT_REVISION                   1
+
+#define sa_readw(AEP, CSR)             readl(&((AEP)->regs.sa->CSR))
+#define sa_readl(AEP,  CSR)            readl(&((AEP)->regs.sa->CSR))
+#define sa_writew(AEP, CSR, value)     writew(value, &((AEP)->regs.sa->CSR))
+#define sa_writel(AEP, CSR, value)     writel(value, &((AEP)->regs.sa->CSR))
+
+/*
+ *     Rx Message Unit Registers
+ */
+
+struct rx_mu_registers {
+                                               //       Local  |   PCI*        |       Name
+                                               //                      |               |
+       u32     ARSR;                           //      1300h   |       00h     |       APIC Register Select Register
+       u32     reserved0;                      //      1304h   |       04h     |       Reserved
+       u32     AWR;                            //      1308h   |       08h     |       APIC Window Register
+       u32     reserved1;                      //      130Ch   |       0Ch     |       Reserved
+       u32     IMRx[2];                        //      1310h   |       10h     |       Inbound Message Registers
+       u32     OMRx[2];                        //      1318h   |       18h     |       Outbound Message Registers
+       u32     IDR;                            //      1320h   |       20h     |       Inbound Doorbell Register
+       u32     IISR;                           //      1324h   |       24h     |       Inbound Interrupt Status Register
+       u32     IIMR;                           //      1328h   |       28h     |       Inbound Interrupt Mask Register
+       u32     ODR;                            //      132Ch   |       2Ch     |       Outbound Doorbell Register
+       u32     OISR;                           //      1330h   |       30h     |       Outbound Interrupt Status Register
+       u32     OIMR;                           //      1334h   |       34h     |       Outbound Interrupt Mask Register
+                                               // * Must access through ATU Inbound Translation Window
+};
+
+struct rx_inbound {
+       u32     Mailbox[8];
+};
+
+#define        InboundMailbox0         IndexRegs.Mailbox[0]
+#define        InboundMailbox1         IndexRegs.Mailbox[1]
+#define        InboundMailbox2         IndexRegs.Mailbox[2]
+#define        InboundMailbox3         IndexRegs.Mailbox[3]
+#define        InboundMailbox4         IndexRegs.Mailbox[4]
+
+#define        INBOUNDDOORBELL_0       cpu_to_le32(0x00000001)
+#define INBOUNDDOORBELL_1      cpu_to_le32(0x00000002)
+#define INBOUNDDOORBELL_2      cpu_to_le32(0x00000004)
+#define INBOUNDDOORBELL_3      cpu_to_le32(0x00000008)
+#define INBOUNDDOORBELL_4      cpu_to_le32(0x00000010)
+#define INBOUNDDOORBELL_5      cpu_to_le32(0x00000020)
+#define INBOUNDDOORBELL_6      cpu_to_le32(0x00000040)
+
+#define        OUTBOUNDDOORBELL_0      cpu_to_le32(0x00000001)
+#define OUTBOUNDDOORBELL_1     cpu_to_le32(0x00000002)
+#define OUTBOUNDDOORBELL_2     cpu_to_le32(0x00000004)
+#define OUTBOUNDDOORBELL_3     cpu_to_le32(0x00000008)
+#define OUTBOUNDDOORBELL_4     cpu_to_le32(0x00000010)
+
+#define InboundDoorbellReg     MUnit.IDR
+#define OutboundDoorbellReg    MUnit.ODR
+
+struct rx_registers {
+       struct rx_mu_registers          MUnit;          // 1300h - 1334h
+       u32                             reserved1[6];   // 1338h - 134ch
+       struct rx_inbound               IndexRegs;
+};
+
+#define rx_readb(AEP, CSR)             readb(&((AEP)->regs.rx->CSR))
+#define rx_readl(AEP, CSR)             readl(&((AEP)->regs.rx->CSR))
+#define rx_writeb(AEP, CSR, value)     writeb(value, &((AEP)->regs.rx->CSR))
+#define rx_writel(AEP, CSR, value)     writel(value, &((AEP)->regs.rx->CSR))
+
+struct fib;
+
+typedef void (*fib_callback)(void *ctxt, struct fib *fibctx);
+
+struct aac_fib_context {
+       s16                     type;           // used for verification of structure   
+       s16                     size;
+       ulong                   jiffies;        // used for cleanup - dmb changed to ulong
+       struct list_head        next;           // used to link context's into a linked list
+#if 0
+       struct semaphore        wait_sem;       // this is used to wait for the next fib to arrive.
+#endif
+       int                     wait;           // Set to true when thread is in WaitForSingleObject
+       unsigned long           count;          // total number of FIBs on FibList
+       struct list_head        fibs;
+};
+
+struct fsa_scsi_hba {
+       u32             size[MAXIMUM_NUM_CONTAINERS];
+       u32             type[MAXIMUM_NUM_CONTAINERS];
+       u8              valid[MAXIMUM_NUM_CONTAINERS];
+       u8              ro[MAXIMUM_NUM_CONTAINERS];
+       u8              locked[MAXIMUM_NUM_CONTAINERS];
+       u8              deleted[MAXIMUM_NUM_CONTAINERS];
+       u32             devno[MAXIMUM_NUM_CONTAINERS];
+};
+
+struct fib {
+       void                    *next;  /* this is used by the allocator */
+       s16                     type;
+       s16                     size;
+       /*
+        *      The Adapter that this I/O is destined for.
+        */
+       struct aac_dev          *dev;
+       u64                     logicaladdr;    /* 64 bit */
+#if 0
+       /*
+        *      This is the event the sendfib routine will wait on if the
+        *      caller did not pass one and this is synch io.
+        */
+       struct semaphore        event_wait;
+#endif
+       spinlock_t              event_lock;
+
+       u32                     done;   /* gets set to 1 when fib is complete */
+       fib_callback            callback;
+       void                    *callback_data;
+       u32                     flags; // u32 dmb was ulong
+       /*
+        *      The following is used to put this fib context onto the 
+        *      Outstanding I/O queue.
+        */
+       struct list_head        queue;
+
+       void                    *data;
+       struct hw_fib           *fib;           /* Actual shared object */
+};
+
+/*
+ *     Adapter Information Block
+ *
+ *     This is returned by the RequestAdapterInfo block
+ */
+struct aac_adapter_info
+{
+       u32     platform;
+       u32     cpu;
+       u32     subcpu;
+       u32     clock;
+       u32     execmem;
+       u32     buffermem;
+       u32     totalmem;
+       u32     kernelrev;
+       u32     kernelbuild;
+       u32     monitorrev;
+       u32     monitorbuild;
+       u32     hwrev;
+       u32     hwbuild;
+       u32     biosrev;
+       u32     biosbuild;
+       u32     cluster;
+       u32     serial[2];
+       u32     battery;
+       u32     options;
+       u32     OEM;
+};
+
+/*
+ * Battery platforms
+ */
+#define AAC_BAT_REQ_PRESENT    (1)
+#define AAC_BAT_REQ_NOTPRESENT (2)
+#define AAC_BAT_OPT_PRESENT    (3)
+#define AAC_BAT_OPT_NOTPRESENT (4)
+#define AAC_BAT_NOT_SUPPORTED  (5)
+/*
+ * cpu types
+ */
+#define AAC_CPU_SIMULATOR      (1)
+#define AAC_CPU_I960           (2)
+#define AAC_CPU_STRONGARM      (3)
+
+/*
+ * Supported Options
+ */
+#define AAC_OPT_SNAPSHOT       cpu_to_le32(1)
+#define AAC_OPT_CLUSTERS       cpu_to_le32(1<<1)
+#define AAC_OPT_WRITE_CACHE    cpu_to_le32(1<<2)
+#define AAC_OPT_64BIT_DATA     cpu_to_le32(1<<3)
+#define AAC_OPT_HOST_TIME_FIB  cpu_to_le32(1<<4)
+#define AAC_OPT_RAID50         cpu_to_le32(1<<5)
+#define AAC_OPT_4GB_WINDOW     cpu_to_le32(1<<6)
+#define AAC_OPT_SCSI_UPGRADEABLE cpu_to_le32(1<<7)
+#define AAC_OPT_SOFT_ERR_REPORT        cpu_to_le32(1<<8)
+#define AAC_OPT_SUPPORTED_RECONDITION cpu_to_le32(1<<9)
+#define AAC_OPT_SGMAP_HOST64   cpu_to_le32(1<<10)
+#define AAC_OPT_ALARM          cpu_to_le32(1<<11)
+#define AAC_OPT_NONDASD                cpu_to_le32(1<<12)
+
+struct aac_dev
+{
+       struct aac_dev          *next;
+       const char              *name;
+       int                     id;
+
+       u16                     irq_mask;
+       /*
+        *      Map for 128 fib objects (64k)
+        */     
+       dma_addr_t              hw_fib_pa;
+       struct hw_fib           *hw_fib_va;
+#if BITS_PER_LONG >= 64
+       ulong                   fib_base_va;
+#endif
+       /*
+        *      Fib Headers
+        */
+       struct fib              fibs[AAC_NUM_FIB];
+       struct fib              *free_fib;
+       struct fib              *timeout_fib;
+       spinlock_t              fib_lock;
+       
+       struct aac_queue_block *queues;
+       /*
+        *      The user API will use an IOCTL to register itself to receive
+        *      FIBs from the adapter.  The following list is used to keep
+        *      track of all the threads that have requested these FIBs.  The
+        *      mutex is used to synchronize access to all data associated 
+        *      with the adapter fibs.
+        */
+       struct list_head        fib_list;
+
+       struct adapter_ops      a_ops;
+       unsigned long           fsrev;          /* Main driver's revision number */
+       
+       struct aac_init         *init;          /* Holds initialization info to communicate with adapter */
+       dma_addr_t              init_pa;        /* Holds physical address of the init struct */
+       
+       struct pci_dev          *pdev;          /* Our PCI interface */
+       void *                  printfbuf;      /* pointer to buffer used for printf's from the adapter */
+       void *                  comm_addr;      /* Base address of Comm area */
+       dma_addr_t              comm_phys;      /* Physical Address of Comm area */
+       size_t                  comm_size;
+
+       struct Scsi_Host        *scsi_host_ptr;
+       struct fsa_scsi_hba     fsa_dev;
+       int                     thread_pid;
+       int                     cardtype;
+       
+       /*
+        *      The following is the device specific extension.
+        */
+       union
+       {
+               struct sa_registers *sa;
+               struct rx_registers *rx;
+       } regs;
+       /*
+        *      The following is the number of the individual adapter
+        */
+       u32                     devnum;
+       u32                     aif_thread;
+#if 0
+       struct completion       aif_completion;
+#endif
+       struct aac_adapter_info adapter_info;
+       /* These are in adapter info but they are in the io flow so
+        * lets break them out so we don't have to do an AND to check them
+        */
+       u8                      nondasd_support; 
+       u8                      pae_support;
+};
+
+#define aac_adapter_interrupt(dev) \
+       dev->a_ops.adapter_interrupt(dev)
+
+#define aac_adapter_notify(dev, event) \
+       dev->a_ops.adapter_notify(dev, event)
+
+#define aac_adapter_enable_int(dev, event) \
+       dev->a_ops.adapter_enable_int(dev, event)
+
+#define aac_adapter_disable_int(dev, event) \
+       dev->a_ops.adapter_disable_int(dev, event)
+
+
+
+#define FIB_CONTEXT_FLAG_TIMED_OUT             (0x00000001)
+
+/*
+ *     Define the command values
+ */
+#define                Null                    0
+#define        GetAttributes           1
+#define        SetAttributes           2
+#define        Lookup                  3
+#define        ReadLink                4
+#define        Read                    5
+#define        Write                   6
+#define                Create                  7
+#define                MakeDirectory           8
+#define                SymbolicLink            9
+#define                MakeNode                10
+#define                Removex                 11
+#define                RemoveDirectoryx        12
+#define                Rename                  13
+#define                Link                    14
+#define                ReadDirectory           15
+#define                ReadDirectoryPlus       16
+#define                FileSystemStatus        17
+#define                FileSystemInfo          18
+#define                PathConfigure           19
+#define                Commit                  20
+#define                Mount                   21
+#define                UnMount                 22
+#define                Newfs                   23
+#define                FsCheck                 24
+#define                FsSync                  25
+#define                SimReadWrite            26
+#define                SetFileSystemStatus     27
+#define                BlockRead               28
+#define                BlockWrite              29
+#define                NvramIoctl              30
+#define                FsSyncWait              31
+#define                ClearArchiveBit         32
+#define                SetAcl                  33
+#define                GetAcl                  34
+#define                AssignAcl               35
+#define                FaultInsertion          36      /* Fault Insertion Command */
+#define                CrazyCache              37      /* Crazycache */
+
+#define                MAX_FSACOMMAND_NUM      38
+
+
+/*
+ *     Define the status returns. These are very unixlike although
+ *     most are not in fact used
+ */
+
+#define                ST_OK           0
+#define                ST_PERM         1
+#define                ST_NOENT        2
+#define                ST_IO           5
+#define                ST_NXIO         6
+#define                ST_E2BIG        7
+#define                ST_ACCES        13
+#define                ST_EXIST        17
+#define                ST_XDEV         18
+#define                ST_NODEV        19
+#define                ST_NOTDIR       20
+#define                ST_ISDIR        21
+#define                ST_INVAL        22
+#define                ST_FBIG         27
+#define                ST_NOSPC        28
+#define                ST_ROFS         30
+#define                ST_MLINK        31
+#define                ST_WOULDBLOCK   35
+#define                ST_NAMETOOLONG  63
+#define                ST_NOTEMPTY     66
+#define                ST_DQUOT        69
+#define                ST_STALE        70
+#define                ST_REMOTE       71
+#define                ST_BADHANDLE    10001
+#define                ST_NOT_SYNC     10002
+#define                ST_BAD_COOKIE   10003
+#define                ST_NOTSUPP      10004
+#define                ST_TOOSMALL     10005
+#define                ST_SERVERFAULT  10006
+#define                ST_BADTYPE      10007
+#define                ST_JUKEBOX      10008
+#define                ST_NOTMOUNTED   10009
+#define                ST_MAINTMODE    10010
+#define                ST_STALEACL     10011
+
+/*
+ *     On writes how does the client want the data written.
+ */
+
+#define        CACHE_CSTABLE           1
+#define CACHE_UNSTABLE         2
+
+/*
+ *     Lets the client know at which level the data was commited on
+ *     a write request
+ */
+
+#define        CMFILE_SYNCH_NVRAM      1
+#define        CMDATA_SYNCH_NVRAM      2
+#define        CMFILE_SYNCH            3
+#define CMDATA_SYNCH           4
+#define CMUNSTABLE             5
+
+struct aac_read
+{
+       u32             command;
+       u32             cid;
+       u32             block;
+       u32             count;
+       struct sgmap    sg;     // Must be last in struct because it is variable
+};
+
+struct aac_read64
+{
+       u32             command;
+       u16             cid;
+       u16             sector_count;
+       u32             block;
+       u16             pad;
+       u16             flags;
+       struct sgmap64  sg;     // Must be last in struct because it is variable
+};
+
+struct aac_read_reply
+{
+       u32             status;
+       u32             count;
+};
+
+struct aac_write
+{
+       u32             command;
+       u32             cid;
+       u32             block;
+       u32             count;
+       u32             stable; // Not used
+       struct sgmap    sg;     // Must be last in struct because it is variable
+};
+
+struct aac_write64
+{
+       u32             command;
+       u16             cid;
+       u16             sector_count;
+       u32             block;
+       u16             pad;
+       u16             flags;
+       struct sgmap64  sg;     // Must be last in struct because it is variable
+};
+struct aac_write_reply
+{
+       u32             status;
+       u32             count;
+       u32             committed;
+};
+
+struct aac_srb
+{
+       u32             function;
+       u32             channel;
+       u32             target;
+       u32             lun;
+       u32             timeout;
+       u32             flags;
+       u32             count;          // Data xfer size
+       u32             retry_limit;
+       u32             cdb_size;
+       u8              cdb[16];
+       struct  sgmap   sg;
+};
+
+
+
+#define                AAC_SENSE_BUFFERSIZE     30
+
+struct aac_srb_reply
+{
+       u32             status;
+       u32             srb_status;
+       u32             scsi_status;
+       u32             data_xfer_length;
+       u32             sense_data_size;
+       u8              sense_data[AAC_SENSE_BUFFERSIZE]; // Can this be SCSI_SENSE_BUFFERSIZE
+};
+/*
+ * SRB Flags
+ */
+#define                SRB_NoDataXfer           0x0000
+#define                SRB_DisableDisconnect    0x0004
+#define                SRB_DisableSynchTransfer 0x0008
+#define        SRB_BypassFrozenQueue    0x0010
+#define                SRB_DisableAutosense     0x0020
+#define                SRB_DataIn               0x0040
+#define        SRB_DataOut              0x0080
+
+/*
+ * SRB Functions - set in aac_srb->function
+ */
+#define        SRBF_ExecuteScsi        0x0000
+#define        SRBF_ClaimDevice        0x0001
+#define        SRBF_IO_Control         0x0002
+#define        SRBF_ReceiveEvent       0x0003
+#define        SRBF_ReleaseQueue       0x0004
+#define        SRBF_AttachDevice       0x0005
+#define        SRBF_ReleaseDevice      0x0006
+#define        SRBF_Shutdown           0x0007
+#define        SRBF_Flush              0x0008
+#define        SRBF_AbortCommand       0x0010
+#define        SRBF_ReleaseRecovery    0x0011
+#define        SRBF_ResetBus           0x0012
+#define        SRBF_ResetDevice        0x0013
+#define        SRBF_TerminateIO        0x0014
+#define        SRBF_FlushQueue         0x0015
+#define        SRBF_RemoveDevice       0x0016
+#define        SRBF_DomainValidation   0x0017
+
+/* 
+ * SRB SCSI Status - set in aac_srb->scsi_status
+ */
+#define SRB_STATUS_PENDING                  0x00
+#define SRB_STATUS_SUCCESS                  0x01
+#define SRB_STATUS_ABORTED                  0x02
+#define SRB_STATUS_ABORT_FAILED             0x03
+#define SRB_STATUS_ERROR                    0x04
+#define SRB_STATUS_BUSY                     0x05
+#define SRB_STATUS_INVALID_REQUEST          0x06
+#define SRB_STATUS_INVALID_PATH_ID          0x07
+#define SRB_STATUS_NO_DEVICE                0x08
+#define SRB_STATUS_TIMEOUT                  0x09
+#define SRB_STATUS_SELECTION_TIMEOUT        0x0A
+#define SRB_STATUS_COMMAND_TIMEOUT          0x0B
+#define SRB_STATUS_MESSAGE_REJECTED         0x0D
+#define SRB_STATUS_BUS_RESET                0x0E
+#define SRB_STATUS_PARITY_ERROR             0x0F
+#define SRB_STATUS_REQUEST_SENSE_FAILED     0x10
+#define SRB_STATUS_NO_HBA                   0x11
+#define SRB_STATUS_DATA_OVERRUN             0x12
+#define SRB_STATUS_UNEXPECTED_BUS_FREE      0x13
+#define SRB_STATUS_PHASE_SEQUENCE_FAILURE   0x14
+#define SRB_STATUS_BAD_SRB_BLOCK_LENGTH     0x15
+#define SRB_STATUS_REQUEST_FLUSHED          0x16
+#define SRB_STATUS_DELAYED_RETRY           0x17
+#define SRB_STATUS_INVALID_LUN              0x20
+#define SRB_STATUS_INVALID_TARGET_ID        0x21
+#define SRB_STATUS_BAD_FUNCTION             0x22
+#define SRB_STATUS_ERROR_RECOVERY           0x23
+#define SRB_STATUS_NOT_STARTED             0x24
+#define SRB_STATUS_NOT_IN_USE              0x30
+#define SRB_STATUS_FORCE_ABORT             0x31
+#define SRB_STATUS_DOMAIN_VALIDATION_FAIL   0x32
+
+/*
+ * Object-Server / Volume-Manager Dispatch Classes
+ */
+
+#define                VM_Null                 0
+#define                VM_NameServe            1
+#define                VM_ContainerConfig      2
+#define                VM_Ioctl                3
+#define                VM_FilesystemIoctl      4
+#define                VM_CloseAll             5
+#define                VM_CtBlockRead          6
+#define                VM_CtBlockWrite         7
+#define                VM_SliceBlockRead       8       /* raw access to configured "storage objects" */
+#define                VM_SliceBlockWrite      9
+#define                VM_DriveBlockRead       10      /* raw access to physical devices */
+#define                VM_DriveBlockWrite      11
+#define                VM_EnclosureMgt         12      /* enclosure management */
+#define                VM_Unused               13      /* used to be diskset management */
+#define                VM_CtBlockVerify        14
+#define                VM_CtPerf               15      /* performance test */
+#define                VM_CtBlockRead64        16
+#define                VM_CtBlockWrite64       17
+#define                VM_CtBlockVerify64      18
+#define                VM_CtHostRead64         19
+#define                VM_CtHostWrite64        20
+
+#define                MAX_VMCOMMAND_NUM       21      /* used for sizing stats array - leave last */
+
+/*
+ *     Descriptive information (eg, vital stats)
+ *     that a content manager might report.  The
+ *     FileArray filesystem component is one example
+ *     of a content manager.  Raw mode might be
+ *     another.
+ */
+
+struct aac_fsinfo {
+       u32  fsTotalSize;       /* Consumed by fs, incl. metadata */
+       u32  fsBlockSize;
+       u32  fsFragSize;
+       u32  fsMaxExtendSize;
+       u32  fsSpaceUnits;
+       u32  fsMaxNumFiles;
+       u32  fsNumFreeFiles;
+       u32  fsInodeDensity;
+};     /* valid iff ObjType == FT_FILESYS && !(ContentState & FSCS_NOTCLEAN) */
+
+union aac_contentinfo {
+       struct aac_fsinfo filesys;      /* valid iff ObjType == FT_FILESYS && !(ContentState & FSCS_NOTCLEAN) */
+};
+
+/*
+ *     Query for "mountable" objects, ie, objects that are typically
+ *     associated with a drive letter on the client (host) side.
+ */
+
+struct aac_mntent {
+       u32                     oid;
+       u8                      name[16];       // if applicable
+       struct creation_info    create_info;    // if applicable
+       u32                     capacity;
+       u32                     vol;            // substrate structure
+       u32                     obj;            // FT_FILESYS, FT_DATABASE, etc.
+       u32                     state;          // unready for mounting, readonly, etc.
+       union aac_contentinfo   fileinfo;       // Info specific to content manager (eg, filesystem)
+       u32                     altoid;         // != oid <==> snapshot or broken mirror exists
+};
+
+#define FSCS_READONLY  0x0002  /*      possible result of broken mirror */
+
+struct aac_query_mount {
+       u32             command;
+       u32             type;
+       u32             count;
+};
+
+struct aac_mount {
+       u32             status;
+       u32             type;           /* should be same as that requested */
+       u32             count;
+       struct aac_mntent mnt[1];
+};
+
+/*
+ * The following command is sent to shut down each container.
+ */
+
+struct aac_close {
+       u32     command;
+       u32     cid;
+};
+
+struct aac_query_disk
+{
+       s32     cnum;
+       s32     bus;
+       s32     target;
+       s32     lun;
+       u32     valid;
+       u32     locked;
+       u32     deleted;
+       s32     instance;
+       s8      name[10];
+       u32     unmapped;
+};
+
+struct aac_delete_disk {
+       u32     disknum;
+       u32     cnum;
+};
+
+struct fib_ioctl
+{
+       char    *fibctx;
+       int     wait;
+       char    *fib;
+};
+
+struct revision
+{
+       u32 compat;
+       u32 version;
+       u32 build;
+};
+       
+/*
+ *     Ugly - non Linux like ioctl coding for back compat.
+ */
+
+#define CTL_CODE(function, method) (                 \
+    (4<< 16) | ((function) << 2) | (method) \
+)
+
+/*
+ *     Define the method codes for how buffers are passed for I/O and FS 
+ *     controls
+ */
+
+#define METHOD_BUFFERED                 0
+#define METHOD_NEITHER                  3
+
+/*
+ *     Filesystem ioctls
+ */
+
+#define FSACTL_SENDFIB                         CTL_CODE(2050, METHOD_BUFFERED)
+#define FSACTL_SEND_RAW_SRB                    CTL_CODE(2067, METHOD_BUFFERED)
+#define FSACTL_DELETE_DISK                     0x163
+#define FSACTL_QUERY_DISK                      0x173
+#define FSACTL_OPEN_GET_ADAPTER_FIB            CTL_CODE(2100, METHOD_BUFFERED)
+#define FSACTL_GET_NEXT_ADAPTER_FIB            CTL_CODE(2101, METHOD_BUFFERED)
+#define FSACTL_CLOSE_GET_ADAPTER_FIB           CTL_CODE(2102, METHOD_BUFFERED)
+#define FSACTL_MINIPORT_REV_CHECK               CTL_CODE(2107, METHOD_BUFFERED)
+#define FSACTL_GET_PCI_INFO                    CTL_CODE(2119, METHOD_BUFFERED)
+#define FSACTL_FORCE_DELETE_DISK               CTL_CODE(2120, METHOD_NEITHER)
+
+
+struct aac_common
+{
+       /*
+        *      If this value is set to 1 then interrupt moderation will occur 
+        *      in the base commuication support.
+        */
+       u32 irq_mod;
+       u32 peak_fibs;
+       u32 zero_fibs;
+       u32 fib_timeouts;
+       /*
+        *      Statistical counters in debug mode
+        */
+#ifdef DBG
+       u32 FibsSent;
+       u32 FibRecved;
+       u32 NoResponseSent;
+       u32 NoResponseRecved;
+       u32 AsyncSent;
+       u32 AsyncRecved;
+       u32 NormalSent;
+       u32 NormalRecved;
+#endif
+};
+
+extern struct aac_common aac_config;
+
+
+/*
+ *     The following macro is used when sending and receiving FIBs. It is
+ *     only used for debugging.
+ */
+#if DBG
+#define        FIB_COUNTER_INCREMENT(counter)          (counter)++
+#else
+#define        FIB_COUNTER_INCREMENT(counter)          
+#endif
+
+/*
+ *     Adapter direct commands
+ *     Monitor/Kernel API
+ */
+
+#define        BREAKPOINT_REQUEST              cpu_to_le32(0x00000004)
+#define        INIT_STRUCT_BASE_ADDRESS        cpu_to_le32(0x00000005)
+#define READ_PERMANENT_PARAMETERS      cpu_to_le32(0x0000000a)
+#define WRITE_PERMANENT_PARAMETERS     cpu_to_le32(0x0000000b)
+#define HOST_CRASHING                  cpu_to_le32(0x0000000d)
+#define        SEND_SYNCHRONOUS_FIB            cpu_to_le32(0x0000000c)
+#define GET_ADAPTER_PROPERTIES         cpu_to_le32(0x00000019)
+#define RE_INIT_ADAPTER                        cpu_to_le32(0x000000ee)
+
+/*
+ *     Adapter Status Register
+ *
+ *  Phase Staus mailbox is 32bits:
+ *     <31:16> = Phase Status
+ *     <15:0>  = Phase
+ *
+ *     The adapter reports is present state through the phase.  Only
+ *     a single phase should be ever be set.  Each phase can have multiple
+ *     phase status bits to provide more detailed information about the 
+ *     state of the board.  Care should be taken to ensure that any phase 
+ *     status bits that are set when changing the phase are also valid
+ *     for the new phase or be cleared out.  Adapter software (monitor,
+ *     iflash, kernel) is responsible for properly maintining the phase 
+ *     status mailbox when it is running.
+ *                                                                                     
+ *     MONKER_API Phases                                                       
+ *
+ *     Phases are bit oriented.  It is NOT valid  to have multiple bits set                                            
+ */                                    
+
+#define        SELF_TEST_FAILED                cpu_to_le32(0x00000004)
+#define        KERNEL_UP_AND_RUNNING           cpu_to_le32(0x00000080)
+#define        KERNEL_PANIC                    cpu_to_le32(0x00000100)
+
+/*
+ *     Doorbell bit defines
+ */
+
+#define DoorBellPrintfDone             cpu_to_le32(1<<5)       // Host -> Adapter
+#define DoorBellAdapterNormCmdReady    cpu_to_le32(1<<1)       // Adapter -> Host
+#define DoorBellAdapterNormRespReady   cpu_to_le32(1<<2)       // Adapter -> Host
+#define DoorBellAdapterNormCmdNotFull  cpu_to_le32(1<<3)       // Adapter -> Host
+#define DoorBellAdapterNormRespNotFull cpu_to_le32(1<<4)       // Adapter -> Host
+#define DoorBellPrintfReady            cpu_to_le32(1<<5)       // Adapter -> Host
+
+/*
+ *     For FIB communication, we need all of the following things
+ *     to send back to the user.
+ */
+#define        AifCmdEventNotify       1       /* Notify of event */
+#define                AifCmdJobProgress       2       /* Progress report */
+#define                AifCmdAPIReport         3       /* Report from other user of API */
+#define                AifCmdDriverNotify      4       /* Notify host driver of event */
+#define                AifReqJobList           100     /* Gets back complete job list */
+#define                AifReqJobsForCtr        101     /* Gets back jobs for specific container */
+#define                AifReqJobsForScsi       102     /* Gets back jobs for specific SCSI device */ 
+#define                AifReqJobReport         103     /* Gets back a specific job report or list of them */ 
+#define                AifReqTerminateJob      104     /* Terminates job */
+#define                AifReqSuspendJob        105     /* Suspends a job */
+#define                AifReqResumeJob         106     /* Resumes a job */ 
+#define                AifReqSendAPIReport     107     /* API generic report requests */
+#define                AifReqAPIJobStart       108     /* Start a job from the API */
+#define                AifReqAPIJobUpdate      109     /* Update a job report from the API */
+#define                AifReqAPIJobFinish      110     /* Finish a job from the API */
+
+/*
+ *     Adapter Initiated FIB command structures. Start with the adapter
+ *     initiated FIBs that really come from the adapter, and get responded
+ *     to by the host.
+ */
+
+struct aac_aifcmd {
+       u32 command;            /* Tell host what type of notify this is */
+       u32 seqnum;             /* To allow ordering of reports (if necessary) */
+       u8 data[1];             /* Undefined length (from kernel viewpoint) */
+};
+
+static inline u32 fib2addr(struct hw_fib *hw)
+{
+       return (u32)hw;
+}
+
+static inline struct hw_fib *addr2fib(u32 addr)
+{
+       return (struct hw_fib *)addr;
+}
+
+const char *aac_driverinfo(struct Scsi_Host *);
+struct fib *fib_alloc(struct aac_dev *dev);
+int fib_setup(struct aac_dev *dev);
+void fib_map_free(struct aac_dev *dev);
+void fib_free(struct fib * context);
+void fib_init(struct fib * context);
+void fib_dealloc(struct fib * context);
+void aac_printf(struct aac_dev *dev, u32 val);
+int fib_send(u16 command, struct fib * context, unsigned long size, int priority, int wait, int reply, fib_callback callback, void *ctxt);
+int aac_consumer_get(struct aac_dev * dev, struct aac_queue * q, struct aac_entry **entry);
+int aac_consumer_avail(struct aac_dev * dev, struct aac_queue * q);
+void aac_consumer_free(struct aac_dev * dev, struct aac_queue * q, u32 qnum);
+int fib_complete(struct fib * context);
+#define fib_data(fibctx) ((void *)(fibctx)->fib->data)
+int aac_detach(struct aac_dev *dev);
+struct aac_dev *aac_init_adapter(struct aac_dev *dev);
+int aac_get_containers(struct aac_dev *dev);
+int aac_scsi_cmd(Scsi_Cmnd *scsi_cmnd_ptr);
+int aac_dev_ioctl(struct aac_dev *dev, int cmd, void *arg);
+int aac_do_ioctl(struct aac_dev * dev, int cmd, void *arg);
+int aac_rx_init(struct aac_dev *dev, unsigned long devNumber);
+int aac_sa_init(struct aac_dev *dev, unsigned long devNumber);
+unsigned int aac_response_normal(struct aac_queue * q);
+unsigned int aac_command_normal(struct aac_queue * q);
+#ifdef TRY_SOFTIRQ
+int aac_command_thread(struct softirq_action *h); 
+#else
+int aac_command_thread(struct aac_dev * dev);
+#endif
+int aac_close_fib_context(struct aac_dev * dev, struct aac_fib_context *fibctx);
+int fib_adapter_complete(struct fib * fibptr, unsigned short size);
+struct aac_driver_ident* aac_get_driver_ident(int devtype);
+int aac_get_adapter_info(struct aac_dev* dev);
diff --git a/xen-2.4.16/drivers/scsi/aacraid/commctrl.c b/xen-2.4.16/drivers/scsi/aacraid/commctrl.c
new file mode 100644 (file)
index 0000000..15b6a62
--- /dev/null
@@ -0,0 +1,438 @@
+/*
+ *     Adaptec AAC series RAID controller driver
+ *     (c) Copyright 2001 Red Hat Inc. <alan@redhat.com>
+ *
+ * based on the old aacraid driver that is..
+ * Adaptec aacraid device driver for Linux.
+ *
+ * Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING.  If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Module Name:
+ *  commctrl.c
+ *
+ * Abstract: Contains all routines for control of the AFA comm layer
+ *
+ */
+
+#include <xeno/config.h>
+/*  #include <xeno/kernel.h> */
+#include <xeno/init.h>
+#include <xeno/types.h>
+#include <xeno/sched.h>
+#include <xeno/pci.h>
+/*  #include <xeno/spinlock.h> */
+/*  #include <xeno/slab.h> */
+/*  #include <xeno/completion.h> */
+#include <xeno/blk.h>
+/*  #include <asm/semaphore.h> */
+#include <asm/uaccess.h>
+#include "scsi.h"
+#include "hosts.h"
+
+#include "aacraid.h"
+
+/**
+ *     ioctl_send_fib  -       send a FIB from userspace
+ *     @dev:   adapter is being processed
+ *     @arg:   arguments to the ioctl call
+ *     
+ *     This routine sends a fib to the adapter on behalf of a user level
+ *     program.
+ */
+static int ioctl_send_fib(struct aac_dev * dev, void *arg)
+{
+       struct hw_fib * kfib;
+       struct fib *fibptr;
+
+       fibptr = fib_alloc(dev);
+       if(fibptr == NULL)
+               return -ENOMEM;
+               
+       kfib = fibptr->fib;
+       /*
+        *      First copy in the header so that we can check the size field.
+        */
+       if (copy_from_user((void *)kfib, arg, sizeof(struct aac_fibhdr))) {
+               fib_free(fibptr);
+               return -EFAULT;
+       }
+       /*
+        *      Since we copy based on the fib header size, make sure that we
+        *      will not overrun the buffer when we copy the memory. Return
+        *      an error if we would.
+        */
+       if(le32_to_cpu(kfib->header.Size) > sizeof(struct hw_fib) - sizeof(struct aac_fibhdr)) {
+               fib_free(fibptr);
+               return -EINVAL;
+       }
+
+       if (copy_from_user((void *) kfib, arg, le32_to_cpu(kfib->header.Size) + sizeof(struct aac_fibhdr))) {
+               fib_free(fibptr);
+               return -EFAULT;
+       }
+
+       if (kfib->header.Command == cpu_to_le32(TakeABreakPt)) {
+               aac_adapter_interrupt(dev);
+               /*
+                * Since we didn't really send a fib, zero out the state to allow 
+                * cleanup code not to assert.
+                */
+               kfib->header.XferState = 0;
+       } else {
+               if (fib_send(kfib->header.Command, fibptr, le32_to_cpu(kfib->header.Size) , FsaNormal,
+                       1, 1, NULL, NULL) != 0) 
+               {
+                       fib_free(fibptr);
+                       return -EINVAL;
+               }
+               if (fib_complete(fibptr) != 0) {
+                       fib_free(fibptr);
+                       return -EINVAL;
+               }
+       }
+       /*
+        *      Make sure that the size returned by the adapter (which includes
+        *      the header) is less than or equal to the size of a fib, so we
+        *      don't corrupt application data. Then copy that size to the user
+        *      buffer. (Don't try to add the header information again, since it
+        *      was already included by the adapter.)
+        */
+
+       if (copy_to_user(arg, (void *)kfib, kfib->header.Size)) {
+               fib_free(fibptr);
+               return -EFAULT;
+       }
+       fib_free(fibptr);
+       return 0;
+}
+
+/**
+ *     open_getadapter_fib     -       Get the next fib
+ *
+ *     This routine will get the next Fib, if available, from the AdapterFibContext
+ *     passed in from the user.
+ */
+
+static int open_getadapter_fib(struct aac_dev * dev, void *arg)
+{
+       struct aac_fib_context * fibctx;
+       int status;
+       unsigned long flags;
+
+       fibctx = kmalloc(sizeof(struct aac_fib_context), GFP_KERNEL);
+       if (fibctx == NULL) {
+               status = -ENOMEM;
+       } else {
+               fibctx->type = FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT;
+               fibctx->size = sizeof(struct aac_fib_context);
+#if 0
+               /*
+                *      Initialize the mutex used to wait for the next AIF.
+                */
+               init_MUTEX_LOCKED(&fibctx->wait_sem);
+#endif
+               fibctx->wait = 0;
+               /*
+                *      Initialize the fibs and set the count of fibs on
+                *      the list to 0.
+                */
+               fibctx->count = 0;
+               INIT_LIST_HEAD(&fibctx->fibs);
+               fibctx->jiffies = jiffies/HZ;
+               /*
+                *      Now add this context onto the adapter's 
+                *      AdapterFibContext list.
+                */
+               spin_lock_irqsave(&dev->fib_lock, flags);
+               list_add_tail(&fibctx->next, &dev->fib_list);
+               spin_unlock_irqrestore(&dev->fib_lock, flags);
+               if (copy_to_user(arg,  &fibctx, sizeof(struct aac_fib_context *))) {
+                       status = -EFAULT;
+               } else {
+                       status = 0;
+               }       
+       }
+       return status;
+}
+
+/**
+ *     next_getadapter_fib     -       get the next fib
+ *     @dev: adapter to use
+ *     @arg: ioctl argument
+ *     
+ *     This routine will get the next Fib, if available, from the AdapterFibContext
+ *     passed in from the user.
+ */
+
+static int next_getadapter_fib(struct aac_dev * dev, void *arg)
+{
+       struct fib_ioctl f;
+       struct aac_fib_context *fibctx, *aifcp;
+       struct hw_fib * fib;
+       int status;
+       struct list_head * entry;
+       int found;
+       unsigned long flags;
+       
+       if(copy_from_user((void *)&f, arg, sizeof(struct fib_ioctl)))
+               return -EFAULT;
+       /*
+        *      Extract the AdapterFibContext from the Input parameters.
+        */
+       fibctx = (struct aac_fib_context *) f.fibctx;
+
+       /*
+        *      Verify that the HANDLE passed in was a valid AdapterFibContext
+        *
+        *      Search the list of AdapterFibContext addresses on the adapter
+        *      to be sure this is a valid address
+        */
+       found = 0;
+       entry = dev->fib_list.next;
+
+       while(entry != &dev->fib_list) {
+               aifcp = list_entry(entry, struct aac_fib_context, next);
+               if(fibctx == aifcp) {   /* We found a winner */
+                       found = 1;
+                       break;
+               }
+               entry = entry->next;
+       }
+       if (found == 0)
+               return -EINVAL;
+
+       if((fibctx->type != FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT) ||
+                (fibctx->size != sizeof(struct aac_fib_context)))
+               return -EINVAL;
+       status = 0;
+       spin_lock_irqsave(&dev->fib_lock, flags);
+       /*
+        *      If there are no fibs to send back, then either wait or return
+        *      -EAGAIN
+        */
+return_fib:
+       if (!list_empty(&fibctx->fibs)) {
+               struct list_head * entry;
+               /*
+                *      Pull the next fib from the fibs
+                */
+               entry = fibctx->fibs.next;
+               list_del(entry);
+               
+               fib = list_entry(entry, struct hw_fib, header.FibLinks);
+               fibctx->count--;
+               spin_unlock_irqrestore(&dev->fib_lock, flags);
+               if (copy_to_user(f.fib, fib, sizeof(struct hw_fib))) {
+                       kfree(fib);
+                       return -EFAULT;
+               }       
+               /*
+                *      Free the space occupied by this copy of the fib.
+                */
+               kfree(fib);
+               status = 0;
+               fibctx->jiffies = jiffies/HZ;
+       } else {
+               spin_unlock_irqrestore(&dev->fib_lock, flags);
+               if (f.wait) {
+#if 0
+                       if(down_interruptible(&fibctx->wait_sem) < 0) {
+                               status = -EINTR;
+                       } else {
+#else
+                           {
+#endif
+                               /* Lock again and retry */
+                               spin_lock_irqsave(&dev->fib_lock, flags);
+                               goto return_fib;
+                       }
+               } else {
+                       status = -EAGAIN;
+               }       
+       }
+       return status;
+}
+
+int aac_close_fib_context(struct aac_dev * dev, struct aac_fib_context * fibctx)
+{
+       struct hw_fib *fib;
+
+       /*
+        *      First free any FIBs that have not been consumed.
+        */
+       while (!list_empty(&fibctx->fibs)) {
+               struct list_head * entry;
+               /*
+                *      Pull the next fib from the fibs
+                */
+               entry = fibctx->fibs.next;
+               list_del(entry);
+               fib = list_entry(entry, struct hw_fib, header.FibLinks);
+               fibctx->count--;
+               /*
+                *      Free the space occupied by this copy of the fib.
+                */
+               kfree(fib);
+       }
+       /*
+        *      Remove the Context from the AdapterFibContext List
+        */
+       list_del(&fibctx->next);
+       /*
+        *      Invalidate context
+        */
+       fibctx->type = 0;
+       /*
+        *      Free the space occupied by the Context
+        */
+       kfree(fibctx);
+       return 0;
+}
+
+/**
+ *     close_getadapter_fib    -       close down user fib context
+ *     @dev: adapter
+ *     @arg: ioctl arguments
+ *
+ *     This routine will close down the fibctx passed in from the user.
+ */
+static int close_getadapter_fib(struct aac_dev * dev, void *arg)
+{
+       struct aac_fib_context *fibctx, *aifcp;
+       int status;
+       unsigned long flags;
+       struct list_head * entry;
+       int found;
+
+       /*
+        *      Extract the fibctx from the input parameters
+        */
+       fibctx = arg;
+
+       /*
+        *      Verify that the HANDLE passed in was a valid AdapterFibContext
+        *
+        *      Search the list of AdapterFibContext addresses on the adapter
+        *      to be sure this is a valid address
+        */
+
+       found = 0;
+       entry = dev->fib_list.next;
+
+       while(entry != &dev->fib_list) {
+               aifcp = list_entry(entry, struct aac_fib_context, next);
+               if(fibctx == aifcp) {   /* We found a winner */
+                       found = 1;
+                       break;
+               }
+               entry = entry->next;
+       }
+
+       if(found == 0)
+               return 0; /* Already gone */
+
+       if((fibctx->type != FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT) ||
+                (fibctx->size != sizeof(struct aac_fib_context)))
+               return -EINVAL;
+       spin_lock_irqsave(&dev->fib_lock, flags);
+       status = aac_close_fib_context(dev, fibctx);
+       spin_unlock_irqrestore(&dev->fib_lock, flags);
+       return status;
+}
+
+/**
+ *     check_revision  -       close down user fib context
+ *     @dev: adapter
+ *     @arg: ioctl arguments
+ *
+ *     This routine returns the firmware version.
+ *      Under Linux, there have been no version incompatibilities, so this is simple!
+ */
+
+static int check_revision(struct aac_dev *dev, void *arg)
+{
+       struct revision response;
+
+       response.compat = 1;
+       response.version = dev->adapter_info.kernelrev;
+       response.build = dev->adapter_info.kernelbuild;
+
+       if (copy_to_user(arg, &response, sizeof(response)))
+               return -EFAULT;
+       return 0;
+}
+
+
+struct aac_pci_info {
+        u32 bus;
+        u32 slot;
+};
+
+
+int aac_get_pci_info(struct aac_dev* dev, void* arg)
+{
+        struct aac_pci_info pci_info;
+
+       pci_info.bus = dev->pdev->bus->number;
+       pci_info.slot = PCI_SLOT(dev->pdev->devfn);
+
+       if(copy_to_user( arg, (void*)&pci_info, sizeof(struct aac_pci_info)))
+               return -EFAULT;
+        return 0;
+ }
+
+int aac_do_ioctl(struct aac_dev * dev, int cmd, void *arg)
+{
+       int status;
+       
+       /*
+        *      HBA gets first crack
+        */
+        
+       status = aac_dev_ioctl(dev, cmd, arg);
+       if(status != -ENOTTY)
+               return status;
+
+       switch (cmd) {
+       case FSACTL_MINIPORT_REV_CHECK:
+               status = check_revision(dev, arg);
+               break;
+       case FSACTL_SENDFIB:
+               status = ioctl_send_fib(dev, arg);
+               break;
+       case FSACTL_OPEN_GET_ADAPTER_FIB:
+               status = open_getadapter_fib(dev, arg);
+               break;
+       case FSACTL_GET_NEXT_ADAPTER_FIB:
+               status = next_getadapter_fib(dev, arg);
+               break;
+       case FSACTL_CLOSE_GET_ADAPTER_FIB:
+               status = close_getadapter_fib(dev, arg);
+               break;
+       case FSACTL_GET_PCI_INFO:
+               status = aac_get_pci_info(dev,arg);
+               break;
+       default:
+               status = -ENOTTY;
+               break;  
+       }
+       return status;
+}
+
diff --git a/xen-2.4.16/drivers/scsi/aacraid/comminit.c b/xen-2.4.16/drivers/scsi/aacraid/comminit.c
new file mode 100644 (file)
index 0000000..29a3dba
--- /dev/null
@@ -0,0 +1,350 @@
+/*
+ *     Adaptec AAC series RAID controller driver
+ *     (c) Copyright 2001 Red Hat Inc. <alan@redhat.com>
+ *
+ * based on the old aacraid driver that is..
+ * Adaptec aacraid device driver for Linux.
+ *
+ * Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING.  If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Module Name:
+ *  comminit.c
+ *
+ * Abstract: This supports the initialization of the host adapter commuication interface.
+ *    This is a platform dependent module for the pci cyclone board.
+ *
+ */
+
+#include <xeno/config.h>
+/* #include <xeno/kernel.h> */
+#include <xeno/init.h>
+#include <xeno/types.h>
+#include <xeno/sched.h>
+#include <xeno/pci.h>
+#include <xeno/spinlock.h>
+/* #include <xeno/slab.h> */
+#include <xeno/blk.h>
+/* #include <xeno/completion.h> */
+/* #include <asm/semaphore.h> */
+#include "scsi.h"
+#include "hosts.h"
+
+#include "aacraid.h"
+
+struct aac_common aac_config;
+
+static struct aac_dev *devices;
+
+static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long commsize, unsigned long commalign)
+{
+       unsigned char *base;
+       unsigned long size, align;
+       unsigned long fibsize = 4096;
+       unsigned long printfbufsiz = 256;
+       struct aac_init *init;
+       dma_addr_t phys;
+
+       /* FIXME: Adaptec add 128 bytes to this value - WHY ?? */
+       size = fibsize + sizeof(struct aac_init) + commsize + commalign + printfbufsiz;
+
+       base = pci_alloc_consistent(dev->pdev, size, &phys);
+       if(base == NULL)
+       {
+               printk(KERN_ERR "aacraid: unable to create mapping.\n");
+               return 0;
+       }
+       dev->comm_addr = (void *)base;
+       dev->comm_phys = phys;
+       dev->comm_size = size;
+
+       dev->init = (struct aac_init *)(base + fibsize);
+       dev->init_pa = phys + fibsize;
+
+       /*
+        *      Cache the upper bits of the virtual mapping for 64bit boxes
+        *      FIXME: this crap should be rewritten
+        */
+#if BITS_PER_LONG >= 64 
+       dev->fib_base_va = ((ulong)base & 0xffffffff00000000);
+#endif
+
+       init = dev->init;
+
+       init->InitStructRevision = cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION);
+       init->MiniPortRevision = cpu_to_le32(Sa_MINIPORT_REVISION);
+       init->fsrev = cpu_to_le32(dev->fsrev);
+
+       /*
+        *      Adapter Fibs are the first thing allocated so that they
+        *      start page aligned
+        */
+       init->AdapterFibsVirtualAddress = cpu_to_le32((u32)base);
+       init->AdapterFibsPhysicalAddress = cpu_to_le32(phys);
+       init->AdapterFibsSize = cpu_to_le32(fibsize);
+       init->AdapterFibAlign = cpu_to_le32(sizeof(struct hw_fib));
+
+       /*
+        * Increment the base address by the amount already used
+        */
+       base = base + fibsize + sizeof(struct aac_init);
+       phys = phys + fibsize + sizeof(struct aac_init);
+       /*
+        *      Align the beginning of Headers to commalign
+        */
+       align = (commalign - ((unsigned long)(base) & (commalign - 1)));
+       base = base + align;
+       phys = phys + align;
+       /*
+        *      Fill in addresses of the Comm Area Headers and Queues
+        */
+       *commaddr = (unsigned long *)base;
+       init->CommHeaderAddress = cpu_to_le32(phys);
+       /*
+        *      Increment the base address by the size of the CommArea
+        */
+       base = base + commsize;
+       phys = phys + commsize;
+       /*
+        *       Place the Printf buffer area after the Fast I/O comm area.
+        */
+       dev->printfbuf = (void *)base;
+       init->printfbuf = cpu_to_le32(phys);
+       init->printfbufsiz = cpu_to_le32(printfbufsiz);
+       memset(base, 0, printfbufsiz);
+       return 1;
+}
+    
+static void aac_queue_init(struct aac_dev * dev, struct aac_queue * q, u32 *mem, int qsize)
+{
+       q->numpending = 0;
+       q->dev = dev;
+       INIT_LIST_HEAD(&q->pendingq);
+#if 0
+       init_waitqueue_head(&q->cmdready);
+#endif
+       INIT_LIST_HEAD(&q->cmdq);
+#if 0
+       init_waitqueue_head(&q->qfull);
+#endif
+       spin_lock_init(&q->lockdata);
+       q->lock = &q->lockdata;
+       q->headers.producer = mem;
+       q->headers.consumer = mem+1;
+       *q->headers.producer = cpu_to_le32(qsize);
+       *q->headers.consumer = cpu_to_le32(qsize);
+       q->entries = qsize;
+}
+
+/**
+ *     aac_send_shutdown               -       shutdown an adapter
+ *     @dev: Adapter to shutdown
+ *
+ *     This routine will send a VM_CloseAll (shutdown) request to the adapter.
+ */
+
+static int aac_send_shutdown(struct aac_dev * dev)
+{
+       struct fib * fibctx;
+       struct aac_close *cmd;
+       int status;
+
+       fibctx = fib_alloc(dev);
+       fib_init(fibctx);
+
+       cmd = (struct aac_close *) fib_data(fibctx);
+
+       cmd->command = cpu_to_le32(VM_CloseAll);
+       cmd->cid = cpu_to_le32(0xffffffff);
+
+       status = fib_send(ContainerCommand,
+                         fibctx,
+                         sizeof(struct aac_close),
+                         FsaNormal,
+                         1, 1,
+                         NULL, NULL);
+
+       if (status == 0)
+               fib_complete(fibctx);
+       fib_free(fibctx);
+       return status;
+}
+
+/**
+ *     aac_detach      -       detach adapter
+ *     @detach: adapter to disconnect
+ *
+ *     Disconnect and shutdown an AAC based adapter, freeing resources
+ *     as we go.
+ */
+
+int aac_detach(struct aac_dev *detach)
+{
+       struct aac_dev **dev = &devices;
+       
+       while(*dev)
+       {
+               if(*dev == detach)
+               {
+                       *dev = detach->next;
+                       aac_send_shutdown(detach);
+                       fib_map_free(detach);
+                       pci_free_consistent(detach->pdev, detach->comm_size, detach->comm_addr, detach->comm_phys);
+                       kfree(detach->queues);
+                       return 1;
+               }
+               dev=&((*dev)->next);
+       }
+       BUG();
+       return 0;
+}
+
+/**
+ *     aac_comm_init   -       Initialise FSA data structures
+ *     @dev:   Adapter to intialise
+ *
+ *     Initializes the data structures that are required for the FSA commuication
+ *     interface to operate. 
+ *     Returns
+ *             1 - if we were able to init the commuication interface.
+ *             0 - If there were errors initing. This is a fatal error.
+ */
+int aac_comm_init(struct aac_dev * dev)
+{
+       unsigned long hdrsize = (sizeof(u32) * NUMBER_OF_COMM_QUEUES) * 2;
+       unsigned long queuesize = sizeof(struct aac_entry) * TOTAL_QUEUE_ENTRIES;
+       u32 *headers;
+       struct aac_entry * queues;
+       unsigned long size;
+       struct aac_queue_block * comm = dev->queues;
+
+       /*
+        *      Now allocate and initialize the zone structures used as our 
+        *      pool of FIB context records.  The size of the zone is based
+        *      on the system memory size.  We also initialize the mutex used
+        *      to protect the zone.
+        */
+       spin_lock_init(&dev->fib_lock);
+
+       /*
+        *      Allocate the physically contigous space for the commuication
+        *      queue headers. 
+        */
+
+       size = hdrsize + queuesize;
+
+       if (!aac_alloc_comm(dev, (void * *)&headers, size, QUEUE_ALIGNMENT))
+               return -ENOMEM;
+
+       queues = (struct aac_entry *)((unsigned char *)headers + hdrsize);
+
+       /* Adapter to Host normal proirity Command queue */ 
+       comm->queue[HostNormCmdQueue].base = queues;
+       aac_queue_init(dev, &comm->queue[HostNormCmdQueue], headers, HOST_NORM_CMD_ENTRIES);
+       queues += HOST_NORM_CMD_ENTRIES;
+       headers += 2;
+
+       /* Adapter to Host high priority command queue */
+       comm->queue[HostHighCmdQueue].base = queues;
+       aac_queue_init(dev, &comm->queue[HostHighCmdQueue], headers, HOST_HIGH_CMD_ENTRIES);
+    
+       queues += HOST_HIGH_CMD_ENTRIES;
+       headers +=2;
+
+       /* Host to adapter normal priority command queue */
+       comm->queue[AdapNormCmdQueue].base = queues;
+       aac_queue_init(dev, &comm->queue[AdapNormCmdQueue], headers, ADAP_NORM_CMD_ENTRIES);
+    
+       queues += ADAP_NORM_CMD_ENTRIES;
+       headers += 2;
+
+       /* host to adapter high priority command queue */
+       comm->queue[AdapHighCmdQueue].base = queues;
+       aac_queue_init(dev, &comm->queue[AdapHighCmdQueue], headers, ADAP_HIGH_CMD_ENTRIES);
+    
+       queues += ADAP_HIGH_CMD_ENTRIES;
+       headers += 2;
+
+       /* adapter to host normal priority response queue */
+       comm->queue[HostNormRespQueue].base = queues;
+       aac_queue_init(dev, &comm->queue[HostNormRespQueue], headers, HOST_NORM_RESP_ENTRIES);
+    
+       queues += HOST_NORM_RESP_ENTRIES;
+       headers += 2;
+
+       /* adapter to host high priority response queue */
+       comm->queue[HostHighRespQueue].base = queues;
+       aac_queue_init(dev, &comm->queue[HostHighRespQueue], headers, HOST_HIGH_RESP_ENTRIES);
+   
+       queues += HOST_HIGH_RESP_ENTRIES;
+       headers += 2;
+
+       /* host to adapter normal priority response queue */
+       comm->queue[AdapNormRespQueue].base = queues;
+       aac_queue_init(dev, &comm->queue[AdapNormRespQueue], headers, ADAP_NORM_RESP_ENTRIES);
+
+       queues += ADAP_NORM_RESP_ENTRIES;
+       headers += 2;
+       
+       /* host to adapter high priority response queue */ 
+       comm->queue[AdapHighRespQueue].base = queues;
+       aac_queue_init(dev, &comm->queue[AdapHighRespQueue], headers, ADAP_HIGH_RESP_ENTRIES);
+
+       comm->queue[AdapNormCmdQueue].lock = comm->queue[HostNormRespQueue].lock;
+       comm->queue[AdapHighCmdQueue].lock = comm->queue[HostHighRespQueue].lock;
+       comm->queue[AdapNormRespQueue].lock = comm->queue[HostNormCmdQueue].lock;
+       comm->queue[AdapHighRespQueue].lock = comm->queue[HostHighCmdQueue].lock;
+
+       return 0;
+}
+
+struct aac_dev *aac_init_adapter(struct aac_dev *dev)
+{
+       /*
+        *      Ok now init the communication subsystem
+        */
+       dev->queues = (struct aac_queue_block *) 
+           kmalloc(sizeof(struct aac_queue_block), GFP_KERNEL);
+       if (dev->queues == NULL) {
+               printk(KERN_ERR "Error could not allocate comm region.\n");
+               return NULL;
+       }
+       memset(dev->queues, 0, sizeof(struct aac_queue_block));
+
+       printk("aac_init_adapater, dev is %p\n", dev); 
+       if (aac_comm_init(dev)<0)
+               return NULL;
+       printk("aac_init_adapater, dev->init is %p\n", dev->init); 
+       /*
+        *      Initialize the list of fibs
+        */
+       if(fib_setup(dev)<0)
+           return NULL;
+               
+       INIT_LIST_HEAD(&dev->fib_list);
+#if 0
+       init_completion(&dev->aif_completion);
+#endif
+       /*
+        *      Add this adapter in to our dev List.
+        */
+       dev->next = devices;
+       devices = dev;
+       return dev;
+}
+
+    
diff --git a/xen-2.4.16/drivers/scsi/aacraid/commsup.c b/xen-2.4.16/drivers/scsi/aacraid/commsup.c
new file mode 100644 (file)
index 0000000..a310fe4
--- /dev/null
@@ -0,0 +1,1022 @@
+/*
+ *     Adaptec AAC series RAID controller driver
+ *     (c) Copyright 2001 Red Hat Inc. <alan@redhat.com>
+ *
+ * based on the old aacraid driver that is..
+
+ * Adaptec aacraid device driver for Linux.
+ *
+ * Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING.  If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Module Name:
+ *  commsup.c
+ *
+ * Abstract: Contain all routines that are required for FSA host/adapter
+ *    commuication.
+ *
+ *
+ */
+
+#include <xeno/config.h>
+/* #include <xeno/kernel.h> */
+#include <xeno/init.h>
+#include <xeno/types.h>
+#include <xeno/sched.h>
+#include <xeno/pci.h>
+#include <xeno/spinlock.h>
+
+#include <xeno/interrupt.h> // for softirq stuff 
+
+/*  #include <xeno/slab.h> */
+/*  #include <xeno/completion.h> */
+/*  #include <asm/semaphore.h> */
+#include <xeno/blk.h>
+#include <xeno/delay.h>
+#include "scsi.h"
+#include "hosts.h"
+
+#include "aacraid.h"
+
+/**
+ *     fib_map_alloc           -       allocate the fib objects
+ *     @dev: Adapter to allocate for
+ *
+ *     Allocate and map the shared PCI space for the FIB blocks used to
+ *     talk to the Adaptec firmware.
+ */
+static int fib_map_alloc(struct aac_dev *dev)
+{
+    if((dev->hw_fib_va = 
+       pci_alloc_consistent(dev->pdev, sizeof(struct hw_fib) * AAC_NUM_FIB, 
+                            &dev->hw_fib_pa))==NULL)
+       return -ENOMEM;
+    return 0;
+}
+
+/**
+ *     fib_map_free            -       free the fib objects
+ *     @dev: Adapter to free
+ *
+ *     Free the PCI mappings and the memory allocated for FIB blocks
+ *     on this adapter.
+ */
+
+void fib_map_free(struct aac_dev *dev)
+{
+    pci_free_consistent(dev->pdev, sizeof(struct hw_fib) * AAC_NUM_FIB, 
+                       dev->hw_fib_va, dev->hw_fib_pa);
+}
+
+/**
+ *     fib_setup       -       setup the fibs
+ *     @dev: Adapter to set up
+ *
+ *     Allocate the PCI space for the fibs, map it and then intialise the
+ *     fib area, the unmapped fib data and also the free list
+ */
+
+int fib_setup(struct aac_dev * dev)
+{
+    struct fib *fibptr;
+    struct hw_fib *fib;
+    dma_addr_t fibpa;
+    int i;
+    
+    if(fib_map_alloc(dev)<0)
+       return -ENOMEM;
+    
+    fib = dev->hw_fib_va;
+    fibpa = dev->hw_fib_pa;
+    memset(fib, 0, sizeof(struct hw_fib) * AAC_NUM_FIB);
+    /*
+     * Initialise the fibs
+     */
+    for (i = 0, fibptr = &dev->fibs[i]; i < AAC_NUM_FIB; i++, fibptr++) 
+    {
+       fibptr->dev = dev;
+       fibptr->fib = fib;
+       fibptr->data = (void *) fibptr->fib->data;
+       fibptr->next = fibptr+1;        /* Forward chain the fibs */
+#if 0
+       init_MUTEX_LOCKED(&fibptr->event_wait);
+#endif
+       spin_lock_init(&fibptr->event_lock);
+       fib->header.XferState = cpu_to_le32(0xffffffff);
+       fib->header.SenderSize = cpu_to_le16(sizeof(struct hw_fib));
+       fibptr->logicaladdr = (unsigned long) fibpa;
+       fib = (struct hw_fib *)((unsigned char *)fib + sizeof(struct hw_fib));
+       fibpa = fibpa + sizeof(struct hw_fib);
+    }
+    /*
+     * Add the fib chain to the free list
+     */
+    dev->fibs[AAC_NUM_FIB-1].next = NULL;
+    /*
+     * Enable this to debug out of queue space
+     */
+    dev->free_fib = &dev->fibs[0];
+    return 0;
+}
+
+/**
+ *     fib_alloc       -       allocate a fib
+ *     @dev: Adapter to allocate the fib for
+ *
+ *     Allocate a fib from the adapter fib pool. If the pool is empty we
+ *     wait for fibs to become free.
+ */
+struct fib * fib_alloc(struct aac_dev *dev)
+{
+    struct fib * fibptr;
+    unsigned long flags;
+    
+    spin_lock_irqsave(&dev->fib_lock, flags);
+    fibptr = dev->free_fib;    
+    if(!fibptr)
+       BUG();
+    dev->free_fib = fibptr->next;
+    spin_unlock_irqrestore(&dev->fib_lock, flags);
+    /*
+     * Set the proper node type code and node byte size
+     */
+    fibptr->type = FSAFS_NTC_FIB_CONTEXT;
+    fibptr->size = sizeof(struct fib);
+    /*
+     * Null out fields that depend on being zero at the start of
+     * each I/O
+     */
+    fibptr->fib->header.XferState = cpu_to_le32(0);
+    fibptr->callback = NULL;
+    fibptr->callback_data = NULL;
+    
+    return fibptr;
+}
+
+/**
+ *     fib_free        -       free a fib
+ *     @fibptr: fib to free up
+ *
+ *     Frees up a fib and places it on the appropriate queue
+ *     (either free or timed out)
+ */
+void fib_free(struct fib * fibptr)
+{
+    unsigned long flags;
+    
+    spin_lock_irqsave(&fibptr->dev->fib_lock, flags);
+    
+    if (fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT) {
+       aac_config.fib_timeouts++;
+       fibptr->next = fibptr->dev->timeout_fib;
+       fibptr->dev->timeout_fib = fibptr;
+    } else {
+       if (fibptr->fib->header.XferState != 0) {
+           printk(KERN_WARNING "fib_free, XferState != 0, "
+                  "fibptr = 0x%p, XferState = 0x%x\n", 
+                  (void *)fibptr, fibptr->fib->header.XferState);
+       }
+       fibptr->next = fibptr->dev->free_fib;
+       fibptr->dev->free_fib = fibptr;
+    }  
+    spin_unlock_irqrestore(&fibptr->dev->fib_lock, flags);
+}
+
+/**
+ *     fib_init        -       initialise a fib
+ *     @fibptr: The fib to initialize
+ *     
+ *     Set up the generic fib fields ready for use
+ */
+void fib_init(struct fib *fibptr)
+{
+    struct hw_fib *fib = fibptr->fib;
+    
+    fib->header.StructType = FIB_MAGIC;
+    fib->header.Size = cpu_to_le16(sizeof(struct hw_fib));
+    fib->header.XferState = cpu_to_le32(HostOwned | FibInitialized | 
+                                       FibEmpty | FastResponseCapable);
+    fib->header.SenderFibAddress = cpu_to_le32(0);
+    fib->header.ReceiverFibAddress = cpu_to_le32(0);
+    fib->header.SenderSize = cpu_to_le16(sizeof(struct hw_fib));
+}
+
+/**
+ *     fib_deallocate          -       deallocate a fib
+ *     @fibptr: fib to deallocate
+ *
+ *     Will deallocate and return to the free pool the FIB pointed to by the
+ *     caller.
+ */
+void fib_dealloc(struct fib * fibptr)
+{
+    struct hw_fib *fib = fibptr->fib;
+    if(fib->header.StructType != FIB_MAGIC) 
+       BUG();
+    fib->header.XferState = cpu_to_le32(0);        
+}
+
+/*
+ *     Commuication primitives define and support the queuing method we use to
+ *     support host to adapter commuication. All queue accesses happen through
+ *     these routines and are the only routines which have a knowledge of the
+ *      how these queues are implemented.
+ */
+/**
+ *     aac_get_entry           -       get a queue entry
+ *     @dev: Adapter
+ *     @qid: Queue Number
+ *     @entry: Entry return
+ *     @index: Index return
+ *     @nonotify: notification control
+ *
+ *     With a priority the routine returns a queue entry if the queue has free entries. If the queue
+ *     is full(no free entries) than no entry is returned and the function returns 0 otherwise 1 is
+ *     returned.
+ */
+static int aac_get_entry (struct aac_dev * dev, u32 qid, struct aac_entry **entry, u32 * index, unsigned long *nonotify)
+{
+    struct aac_queue * q;
+
+    /*
+     * All of the queues wrap when they reach the end, so we check
+     * to see if they have reached the end and if they have we just
+     * set the index back to zero. This is a wrap. You could or off
+     * the high bits in all updates but this is a bit faster I think.
+     */
+
+    q = &dev->queues->queue[qid];
+       
+    *index = le32_to_cpu(*(q->headers.producer));
+    if (*index - 2 == le32_to_cpu(*(q->headers.consumer)))
+       *nonotify = 1; 
+
+    if (qid == AdapHighCmdQueue) {
+       if (*index >= ADAP_HIGH_CMD_ENTRIES)
+           *index = 0;
+    } else if (qid == AdapNormCmdQueue) {
+       if (*index >= ADAP_NORM_CMD_ENTRIES) 
+           *index = 0; /* Wrap to front of the Producer Queue. */
+    }
+    else if (qid == AdapHighRespQueue) 
+    {
+       if (*index >= ADAP_HIGH_RESP_ENTRIES)
+           *index = 0;
+    }
+    else if (qid == AdapNormRespQueue) 
+    {
+       if (*index >= ADAP_NORM_RESP_ENTRIES) 
+           *index = 0; /* Wrap to front of the Producer Queue. */
+    }
+    else BUG();
+
+    if (*index + 1 == le32_to_cpu(*(q->headers.consumer))) { /* Queue full */
+       printk(KERN_WARNING "Queue %d full, %ld outstanding.\n", 
+              qid, q->numpending);
+       return 0;
+    } else {
+       *entry = q->base + *index;
+       return 1;
+    }
+}   
+
+/**
+ *     aac_queue_get           -       get the next free QE
+ *     @dev: Adapter
+ *     @index: Returned index
+ *     @priority: Priority of fib
+ *     @fib: Fib to associate with the queue entry
+ *     @wait: Wait if queue full
+ *     @fibptr: Driver fib object to go with fib
+ *     @nonotify: Don't notify the adapter
+ *
+ *     Gets the next free QE off the requested priorty adapter command
+ *     queue and associates the Fib with the QE. The QE represented by
+ *     index is ready to insert on the queue when this routine returns
+ *     success.
+ */
+
+static int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * fib, int wait, struct fib * fibptr, unsigned long *nonotify)
+{
+    struct aac_entry * entry = NULL;
+    int map = 0;
+    struct aac_queue * q = &dev->queues->queue[qid];
+               
+    spin_lock_irqsave(q->lock, q->SavedIrql);
+           
+    if (qid == AdapHighCmdQueue || qid == AdapNormCmdQueue) 
+    {
+       /*  if no entries wait for some if caller wants to */
+       while (!aac_get_entry(dev, qid, &entry, index, nonotify)) 
+       {
+           printk(KERN_ERR "GetEntries failed\n");
+       }
+       /*
+        *      Setup queue entry with a command, status and fib mapped
+        */
+       entry->size = cpu_to_le32(le16_to_cpu(fib->header.Size));
+       map = 1;
+    }
+    else if (qid == AdapHighRespQueue || qid == AdapNormRespQueue)
+    {
+       while(!aac_get_entry(dev, qid, &entry, index, nonotify)) 
+       {
+           /* if no entries wait for some if caller wants to */
+       }
+       /*
+        *      Setup queue entry with command, status and fib mapped
+        */
+       entry->size = cpu_to_le32(le16_to_cpu(fib->header.Size));
+       entry->addr = cpu_to_le32(fib->header.SenderFibAddress);                /* Restore adapters pointer to the FIB */
+       fib->header.ReceiverFibAddress = fib->header.SenderFibAddress;          /* Let the adapter now where to find its data */
+       map = 0;
+    } 
+    /*
+     * If MapFib is true than we need to map the Fib and put pointers
+     * in the queue entry.
+     */
+    if (map)
+       entry->addr = cpu_to_le32((unsigned long)(fibptr->logicaladdr));
+    return 0;
+}
+
+
+/**
+ *     aac_insert_entry        -       insert a queue entry
+ *     @dev: Adapter
+ *     @index: Index of entry to insert
+ *     @qid: Queue number
+ *     @nonotify: Suppress adapter notification
+ *
+ *     Gets the next free QE off the requested priorty adapter command
+ *     queue and associates the Fib with the QE. The QE represented by
+ *     index is ready to insert on the queue when this routine returns
+ *     success.
+ */
+static int aac_insert_entry(struct aac_dev * dev, u32 index, u32 qid, unsigned long nonotify) 
+{
+    struct aac_queue * q = &dev->queues->queue[qid];
+
+    if(q == NULL)
+       BUG();
+    *(q->headers.producer) = cpu_to_le32(index + 1);
+    spin_unlock_irqrestore(q->lock, q->SavedIrql);
+
+    if (qid == AdapHighCmdQueue ||
+       qid == AdapNormCmdQueue ||
+       qid == AdapHighRespQueue ||
+       qid == AdapNormRespQueue)
+    {
+       if (!nonotify)
+           aac_adapter_notify(dev, qid);
+    }
+    else
+       printk("Suprise insert!\n");
+    return 0;
+}
+
+/*
+ *     Define the highest level of host to adapter communication routines. 
+ *     These routines will support host to adapter FS commuication. These 
+ *     routines have no knowledge of the commuication method used. This level
+ *     sends and receives FIBs. This level has no knowledge of how these FIBs
+ *     get passed back and forth.
+ */
+
+/**
+ *     fib_send        -       send a fib to the adapter
+ *     @command: Command to send
+ *     @fibptr: The fib
+ *     @size: Size of fib data area
+ *     @priority: Priority of Fib
+ *     @wait: Async/sync select
+ *     @reply: True if a reply is wanted
+ *     @callback: Called with reply
+ *     @callback_data: Passed to callback
+ *
+ *     Sends the requested FIB to the adapter and optionally will wait for a
+ *     response FIB. If the caller does not wish to wait for a response than
+ *     an event to wait on must be supplied. This event will be set when a
+ *     response FIB is received from the adapter.
+ */
+int fib_send(u16 command, struct fib * fibptr, unsigned long size,  int priority, int wait, int reply, fib_callback callback, void * callback_data)
+{
+    u32 index;
+    u32 qid;
+    struct aac_dev * dev = fibptr->dev;
+    unsigned long nointr = 0;
+    struct hw_fib * fib = fibptr->fib;
+    struct aac_queue * q;
+    unsigned long flags = 0;
+
+    if (!(le32_to_cpu(fib->header.XferState) & HostOwned))
+       return -EBUSY;
+    /*
+     * There are 5 cases with the wait and reponse requested flags. 
+     * The only invalid cases are if the caller requests to wait and
+     * does not request a response and if the caller does not want a
+     * response and the Fibis not allocated from pool. If a response
+     * is not requesed the Fib will just be deallocaed by the DPC
+     * routine when the response comes back from the adapter. No
+     * further processing will be done besides deleting the Fib. We 
+     * will have a debug mode where the adapter can notify the host
+     * it had a problem and the host can log that fact.
+     */
+    if (wait && !reply) {
+       return -EINVAL;
+    } else if (!wait && reply) {
+       fib->header.XferState |= cpu_to_le32(Async | ResponseExpected);
+       FIB_COUNTER_INCREMENT(aac_config.AsyncSent);
+    } else if (!wait && !reply) {
+       fib->header.XferState |= cpu_to_le32(NoResponseExpected);
+       FIB_COUNTER_INCREMENT(aac_config.NoResponseSent);
+    } else if (wait && reply) {
+       fib->header.XferState |= cpu_to_le32(ResponseExpected);
+       FIB_COUNTER_INCREMENT(aac_config.NormalSent);
+    } 
+    /*
+     * Map the fib into 32bits by using the fib number
+     */
+    fib->header.SenderData = fibptr-&dev->fibs[0];     /* for callback */
+    /*
+     * Set FIB state to indicate where it came from and if we want a
+     * response from the adapter. Also load the command from the
+     * caller.
+     *
+     * Map the hw fib pointer as a 32bit value
+     */
+    fib->header.SenderFibAddress = fib2addr(fib);
+    fib->header.Command = cpu_to_le16(command);
+    fib->header.XferState |= cpu_to_le32(SentFromHost);
+    fibptr->fib->header.Flags = 0; /* Zero flags field - its internal only */
+    /*
+     * Set the size of the Fib we want to send to the adapter
+     */
+    fib->header.Size = cpu_to_le16(sizeof(struct aac_fibhdr) + size);
+    if (le16_to_cpu(fib->header.Size) > le16_to_cpu(fib->header.SenderSize)) {
+       return -EMSGSIZE;
+    }                
+    /*
+     * Get a queue entry connect the FIB to it and send an notify
+     * the adapter a command is ready.
+     */
+    if (priority == FsaHigh) {
+       fib->header.XferState |= cpu_to_le32(HighPriority);
+       qid = AdapHighCmdQueue;
+    } else {
+       fib->header.XferState |= cpu_to_le32(NormalPriority);
+       qid = AdapNormCmdQueue;
+    }
+    q = &dev->queues->queue[qid];
+
+    if(wait)
+       spin_lock_irqsave(&fibptr->event_lock, flags);
+
+    if(aac_queue_get( dev, &index, qid, fib, 1, fibptr, &nointr)<0)
+       return -EWOULDBLOCK;
+    dprintk((KERN_DEBUG "fib_send: inserting a queue entry at index %d.\n",
+            index));
+    dprintk((KERN_DEBUG "Fib contents:.\n"));
+    dprintk((KERN_DEBUG "  Command =               %d.\n", 
+            fib->header.Command));
+    dprintk((KERN_DEBUG "  XferState  =            %x.\n", 
+            fib->header.XferState));
+    /*
+     * Fill in the Callback and CallbackContext if we are not
+     * going to wait.
+     */
+    if (!wait) {
+       fibptr->callback = callback;
+       fibptr->callback_data = callback_data;
+    }
+    FIB_COUNTER_INCREMENT(aac_config.FibsSent);
+    list_add_tail(&fibptr->queue, &q->pendingq);
+    q->numpending++;
+
+    fibptr->done = 0;
+
+    if(aac_insert_entry(dev, index, qid, 
+                       (nointr & aac_config.irq_mod)) < 0)
+       return -EWOULDBLOCK;
+    /*
+     * If the caller wanted us to wait for response wait now. 
+     */
+    
+    if (wait) {
+       spin_unlock_irqrestore(&fibptr->event_lock, flags);
+#if 0
+       down(&fibptr->event_wait);
+#endif
+#ifdef TRY_SOFTIRQ
+       printk("about to softirq aac_command_thread...\n"); 
+       while (!fibptr->done) { 
+           raise_softirq(SCSI_LOW_SOFTIRQ); 
+           mdelay(100); 
+       }
+       printk("back from softirq cmd thread and fibptr->done!\n"); 
+#else 
+       printk("about to bail at aac_command_thread...\n"); 
+       while (!fibptr->done) { 
+           mdelay(100); 
+           aac_command_thread(dev); 
+       }
+       printk("back from command thread and fibptr->done!\n"); 
+#endif
+/*  if(fibptr->done == 0) */
+/*                     BUG(); */
+                       
+       if((fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT))
+           return -ETIMEDOUT;
+       else
+           return 0;
+    }
+    /*
+     * If the user does not want a response than return success otherwise
+     * return pending
+     */
+    if (reply)
+       return -EINPROGRESS;
+    else
+       return 0;
+}
+
+/** 
+ *     aac_consumer_get        -       get the top of the queue
+ *     @dev: Adapter
+ *     @q: Queue
+ *     @entry: Return entry
+ *
+ *      Will return a pointer to the entry on the top of the queue
+ *     requested that we are a consumer of, and return the address of
+ *     the queue entry. It does * not change the state of the queue.
+ */
+
+int aac_consumer_get(struct aac_dev * dev, struct aac_queue * q, struct aac_entry **entry)
+{
+    u32 index;
+    int status;
+
+    if (*q->headers.producer == *q->headers.consumer) {
+       status = 0;
+    } else {
+       /*
+        *      The consumer index must be wrapped if we have reached
+        *      the end of the queue, else we just use the entry
+        *      pointed to by the header index
+        */
+       if (le32_to_cpu(*q->headers.consumer) >= q->entries) 
+           index = 0;          
+       else
+           index = le32_to_cpu(*q->headers.consumer);
+       *entry = q->base + index;
+       status = 1;
+    }
+    return(status);
+}
+
+int aac_consumer_avail(struct aac_dev *dev, struct aac_queue * q)
+{
+    return (*q->headers.producer != *q->headers.consumer);
+}
+
+
+/**
+ *     aac_consumer_free       -       free consumer entry
+ *     @dev: Adapter
+ *     @q: Queue
+ *     @qid: Queue ident
+ *
+ *     Frees up the current top of the queue we are a consumer of. If the
+ *     queue was full notify the producer that the queue is no longer full.
+ */
+
+void aac_consumer_free(struct aac_dev * dev, struct aac_queue *q, u32 qid)
+{
+    int wasfull = 0;
+    u32 notify;
+
+    if (*q->headers.producer+1 == *q->headers.consumer)
+       wasfull = 1;
+        
+    if (le32_to_cpu(*q->headers.consumer) >= q->entries)
+       *q->headers.consumer = cpu_to_le32(1);
+    else
+       *q->headers.consumer = 
+           cpu_to_le32(le32_to_cpu(*q->headers.consumer)+1);
+        
+    if (wasfull) {
+       switch (qid) {
+
+       case HostNormCmdQueue:
+           notify = HostNormCmdNotFull;
+           break;
+       case HostHighCmdQueue:
+           notify = HostHighCmdNotFull;
+           break;
+       case HostNormRespQueue:
+           notify = HostNormRespNotFull;
+           break;
+       case HostHighRespQueue:
+           notify = HostHighRespNotFull;
+           break;
+       default:
+           BUG();
+           return;
+       }
+       aac_adapter_notify(dev, notify);
+    }
+}        
+
+/**
+ *     fib_adapter_complete    -       complete adapter issued fib
+ *     @fibptr: fib to complete
+ *     @size: size of fib
+ *
+ *     Will do all necessary work to complete a FIB that was sent from
+ *     the adapter.
+ */
+
+int fib_adapter_complete(struct fib * fibptr, unsigned short size)
+{
+    struct hw_fib * fib = fibptr->fib;
+    struct aac_dev * dev = fibptr->dev;
+    unsigned long nointr = 0;
+
+    if (le32_to_cpu(fib->header.XferState) == 0)
+       return 0;
+    /*
+     * If we plan to do anything check the structure type first.
+     */ 
+    if ( fib->header.StructType != FIB_MAGIC ) {
+       return -EINVAL;
+    }
+    /*
+     * This block handles the case where the adapter had sent us a
+     * command and we have finished processing the command. We
+     * call completeFib when we are done processing the command 
+     * and want to send a response back to the adapter. This will 
+     * send the completed cdb to the adapter.
+     */
+    if (fib->header.XferState & cpu_to_le32(SentFromAdapter)) {
+       fib->header.XferState |= cpu_to_le32(HostProcessed);
+       if (fib->header.XferState & cpu_to_le32(HighPriority)) {
+           u32 index;
+           if (size) 
+           {
+               size += sizeof(struct aac_fibhdr);
+               if (size > le16_to_cpu(fib->header.SenderSize))
+                   return -EMSGSIZE;
+               fib->header.Size = cpu_to_le16(size);
+           }
+           if(aac_queue_get(dev, &index, AdapHighRespQueue, 
+                            fib, 1, NULL, &nointr) < 0) {
+               return -EWOULDBLOCK;
+           }
+           if (aac_insert_entry(dev, index, AdapHighRespQueue,  
+                                (nointr & (int)aac_config.irq_mod)) != 0) {
+           }
+       }
+       else if (fib->header.XferState & NormalPriority) 
+       {
+           u32 index;
+
+           if (size) {
+               size += sizeof(struct aac_fibhdr);
+               if (size > le16_to_cpu(fib->header.SenderSize)) 
+                   return -EMSGSIZE;
+               fib->header.Size = cpu_to_le16(size);
+           }
+           if (aac_queue_get(dev, &index, AdapNormRespQueue, 
+                             fib, 1, NULL, &nointr) < 0) 
+               return -EWOULDBLOCK;
+           if (aac_insert_entry(dev, index, AdapNormRespQueue, 
+                                (nointr & (int)aac_config.irq_mod)) != 0) 
+           {
+           }
+       }
+    }
+    else 
+    {
+       printk(KERN_WARNING 
+              "fib_adapter_complete: Unknown xferstate detected.\n");
+       BUG();
+    }   
+    return 0;
+}
+
+/**
+ *     fib_complete    -       fib completion handler
+ *     @fib: FIB to complete
+ *
+ *     Will do all necessary work to complete a FIB.
+ */
+int fib_complete(struct fib * fibptr)
+{
+    struct hw_fib * fib = fibptr->fib;
+
+    /*
+     * Check for a fib which has already been completed
+     */
+
+    if (fib->header.XferState == cpu_to_le32(0))
+       return 0;
+    /*
+     * If we plan to do anything check the structure type first.
+     */ 
+
+    if (fib->header.StructType != FIB_MAGIC)
+       return -EINVAL;
+    /*
+     * This block completes a cdb which orginated on the host and we 
+     * just need to deallocate the cdb or reinit it. At this point the
+     * command is complete that we had sent to the adapter and this
+     * cdb could be reused.
+     */
+    if((fib->header.XferState & cpu_to_le32(SentFromHost)) &&
+       (fib->header.XferState & cpu_to_le32(AdapterProcessed)))
+    {
+       fib_dealloc(fibptr);
+    }
+    else if(fib->header.XferState & cpu_to_le32(SentFromHost))
+    {
+       /*
+        *      This handles the case when the host has aborted the I/O
+        *      to the adapter because the adapter is not responding
+        */
+       fib_dealloc(fibptr);
+    } else if(fib->header.XferState & cpu_to_le32(HostOwned)) {
+       fib_dealloc(fibptr);
+    } else {
+       BUG();
+    }   
+    return 0;
+}
+
+/**
+ *     aac_printf      -       handle printf from firmware
+ *     @dev: Adapter
+ *     @val: Message info
+ *
+ *     Print a message passed to us by the controller firmware on the
+ *     Adaptec board
+ */
+
+void aac_printf(struct aac_dev *dev, u32 val)
+{
+    int length = val & 0xffff;
+    int level = (val >> 16) & 0xffff;
+    char *cp = dev->printfbuf;
+       
+    /*
+     * The size of the printfbuf is set in port.c
+     * There is no variable or define for it
+     */
+    if (length > 255)
+       length = 255;
+    if (cp[length] != 0)
+       cp[length] = 0;
+    if (level == LOG_HIGH_ERROR)
+       printk(KERN_WARNING "aacraid:%s", cp);
+    else
+       printk(KERN_INFO "aacraid:%s", cp);
+    memset(cp, 0,  256);
+}
+
+
+/**
+ *     aac_handle_aif          -       Handle a message from the firmware
+ *     @dev: Which adapter this fib is from
+ *     @fibptr: Pointer to fibptr from adapter
+ *
+ *     This routine handles a driver notify fib from the adapter and
+ *     dispatches it to the appropriate routine for handling.
+ */
+
+static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
+{
+    struct hw_fib * fib = fibptr->fib;
+    /*
+     * Set the status of this FIB to be Invalid parameter.
+     *
+     * *(u32 *)fib->data = ST_INVAL;
+     */
+    *(u32 *)fib->data = cpu_to_le32(ST_OK);
+    fib_adapter_complete(fibptr, sizeof(u32));
+}
+
+/**
+ *     aac_command_thread      -       command processing thread
+ *     @dev: Adapter to monitor
+ *
+ *     Waits on the commandready event in it's queue. When the event gets set
+ *     it will pull FIBs off it's queue. It will continue to pull FIBs off
+ *     until the queue is empty. When the queue is empty it will wait for
+ *     more FIBs.
+ */
+#ifndef TRY_SOFTIRQ
+int aac_command_thread(struct aac_dev * dev)
+{
+#else
+int aac_command_thread(struct softirq_action *h)
+{   
+    struct aac_dev *dev = (struct aac_dev *)h->data; 
+#endif
+    struct hw_fib *fib, *newfib;
+    struct fib fibptr; /* for error logging */
+    struct aac_queue_block *queues = dev->queues;
+    struct aac_fib_context *fibctx;
+    unsigned long flags;
+#if 0
+    DECLARE_WAITQUEUE(wait, current);
+#endif
+
+    /*
+     * We can only have one thread per adapter for AIF's.
+     */
+    printk("aac_command_'thread': entered.\n"); 
+    if (dev->aif_thread)
+       return -EINVAL;
+
+#if 0
+    /*
+     * Set up the name that will appear in 'ps'
+     * stored in  task_struct.comm[16].
+     */
+    sprintf(current->comm, "aacraid");
+    daemonize();
+#endif
+
+    /*
+     * Let the DPC know it has a place to send the AIF's to.
+     */
+    dev->aif_thread = 1;
+    memset(&fibptr, 0, sizeof(struct fib));
+#if 0
+    add_wait_queue(&queues->queue[HostNormCmdQueue].cmdready, &wait);
+    set_current_state(TASK_INTERRUPTIBLE);
+#endif
+//    while(1) 
+    {
+
+       printk("aac_command_thread: in 'loop'\n"); 
+       spin_lock_irqsave(queues->queue[HostNormCmdQueue].lock, flags);
+       printk("flags = %x\n", flags); 
+       while(!list_empty(&(queues->queue[HostNormCmdQueue].cmdq))) {
+           struct list_head *entry;
+           struct aac_aifcmd * aifcmd;
+
+#if 0
+           set_current_state(TASK_RUNNING);
+#endif
+
+               
+           entry = queues->queue[HostNormCmdQueue].cmdq.next;
+           list_del(entry);
+                       
+           spin_unlock_irqrestore(queues->queue[HostNormCmdQueue].lock,flags);
+           fib = list_entry(entry, struct hw_fib, header.FibLinks);
+           printk("aac_command_thread: got fib \n"); 
+           /*
+            *  We will process the FIB here or pass it to a 
+            *  worker thread that is TBD. We Really can't 
+            *  do anything at this point since we don't have
+            *  anything defined for this thread to do.
+            */
+           memset(&fibptr, 0, sizeof(struct fib));
+           fibptr.type = FSAFS_NTC_FIB_CONTEXT;
+           fibptr.size = sizeof( struct fib );
+           fibptr.fib = fib;
+           fibptr.data = fib->data;
+           fibptr.dev = dev;
+           /*
+            *  We only handle AifRequest fibs from the adapter.
+            */
+           aifcmd = (struct aac_aifcmd *) fib->data;
+           if (aifcmd->command == le16_to_cpu(AifCmdDriverNotify)) {
+               printk("aac_command_thread: handling aif... :-( \n"); 
+               aac_handle_aif(dev, &fibptr);
+           } else {
+               /* The u32 here is important and intended. We are using
+                  32bit wrapping time to fit the adapter field */
+               u32 time_now, time_last;
+               unsigned long flagv;
+               
+               time_now = jiffies/HZ;
+
+               spin_lock_irqsave(&dev->fib_lock, flagv);
+               entry = dev->fib_list.next;
+                               /*
+                                * For each Context that is on the 
+                                * fibctxList, make a copy of the
+                                * fib, and then set the event to wake up the
+                                * thread that is waiting for it.
+                                */
+               while (entry != &dev->fib_list) {
+                   /*
+                    * Extract the fibctx
+                    */
+                   fibctx = list_entry(entry, struct aac_fib_context, next);
+                   /*
+                    * Check if the queue is getting
+                    * backlogged
+                    */
+                   if (fibctx->count > 20)
+                   {
+                       time_last = fibctx->jiffies;
+                       /*
+                        * Has it been > 2 minutes 
+                        * since the last read off
+                        * the queue?
+                        */
+                       if ((time_now - time_last) > 120) {
+                           entry = entry->next;
+                           aac_close_fib_context(dev, fibctx);
+                           continue;
+                       }
+                   }
+                   /*
+                    * Warning: no sleep allowed while
+                    * holding spinlock
+                    */
+                   newfib = kmalloc(sizeof(struct hw_fib), GFP_ATOMIC);
+                   if (newfib) {
+                       /*
+                        * Make the copy of the FIB
+                        */
+                       memcpy(newfib, fib, sizeof(struct hw_fib));
+                       /*
+                        * Put the FIB onto the
+                        * fibctx's fibs
+                        */
+                       list_add_tail(&newfib->header.FibLinks, &fibctx->fibs);
+                       fibctx->count++;
+#if 0
+                       /* 
+                        * Set the event to wake up the
+                        * thread that will waiting.
+                        */
+                       up(&fibctx->wait_sem);
+#endif
+                   } else {
+                       printk(KERN_WARNING "aifd: didn't allocate NewFib.\n");
+                   }
+                   entry = entry->next;
+               }
+                               /*
+                                *      Set the status of this FIB
+                                */
+               *(u32 *)fib->data = cpu_to_le32(ST_OK);
+               fib_adapter_complete(&fibptr, sizeof(u32));
+               spin_unlock_irqrestore(&dev->fib_lock, flagv);
+           }
+           spin_lock_irqsave(queues->queue[HostNormCmdQueue].lock, flags);
+       }
+       /*
+        *      There are no more AIF's
+        */
+       spin_unlock_irqrestore(queues->queue[HostNormCmdQueue].lock, flags);
+#if 0
+       schedule();
+
+       if(signal_pending(current))
+           break;
+       set_current_state(TASK_INTERRUPTIBLE);
+#endif
+
+    }
+    
+#if 0
+    remove_wait_queue(&queues->queue[HostNormCmdQueue].cmdready, &wait);
+    dev->aif_thread = 0;
+    complete_and_exit(&dev->aif_completion, 0);
+#else
+    mdelay(50); 
+    dev->aif_thread = 0;
+
+#endif
+    return 0;
+}
diff --git a/xen-2.4.16/drivers/scsi/aacraid/dpcsup.c b/xen-2.4.16/drivers/scsi/aacraid/dpcsup.c
new file mode 100644 (file)
index 0000000..c9b4dfe
--- /dev/null
@@ -0,0 +1,207 @@
+/*
+ *     Adaptec AAC series RAID controller driver
+ *     (c) Copyright 2001 Red Hat Inc. <alan@redhat.com>
+ *
+ * based on the old aacraid driver that is..
+ * Adaptec aacraid device driver for Linux.
+ *
+ * Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING.  If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Module Name:
+ *  dpcsup.c
+ *
+ * Abstract: All DPC processing routines for the cyclone board occur here.
+ *
+ *
+ */
+
+#include <xeno/config.h>
+/* #include <xeno/kernel.h> */
+#include <xeno/init.h>
+#include <xeno/types.h>
+#include <xeno/sched.h>
+#include <xeno/pci.h>
+/*  #include <xeno/spinlock.h> */
+/*  #include <xeno/slab.h> */
+/*  #include <xeno/completion.h> */
+#include <xeno/blk.h>
+/*  #include <asm/semaphore.h> */
+#include "scsi.h"
+#include "hosts.h"
+
+#include "aacraid.h"
+
+/**
+ *     aac_response_normal     -       Handle command replies
+ *     @q: Queue to read from
+ *
+ *     This DPC routine will be run when the adapter interrupts us to let us
+ *     know there is a response on our normal priority queue. We will pull off
+ *     all QE there are and wake up all the waiters before exiting. We will
+ *     take a spinlock out on the queue before operating on it.
+ */
+
+unsigned int aac_response_normal(struct aac_queue * q)
+{
+       struct aac_dev * dev = q->dev;
+       struct aac_entry *entry;
+       struct hw_fib * hwfib;
+       struct fib * fib;
+       int consumed = 0;
+       unsigned long flags;
+
+       spin_lock_irqsave(q->lock, flags);      
+
+       /*
+        *      Keep pulling response QEs off the response queue and waking
+        *      up the waiters until there are no more QEs. We then return
+        *      back to the system. If no response was requesed we just
+        *      deallocate the Fib here and continue.
+        */
+       while(aac_consumer_get(dev, q, &entry))
+       {
+               int fast;
+
+               fast = (int) (entry->addr & 0x01);
+               hwfib = addr2fib(entry->addr & ~0x01);
+               aac_consumer_free(dev, q, HostNormRespQueue);
+               fib = &dev->fibs[hwfib->header.SenderData];
+               /*
+                *      Remove this fib from the Outstanding I/O queue.
+                *      But only if it has not already been timed out.
+                *
+                *      If the fib has been timed out already, then just 
+                *      continue. The caller has already been notified that
+                *      the fib timed out.
+                */
+               if (!(fib->flags & FIB_CONTEXT_FLAG_TIMED_OUT)) {
+                       list_del(&fib->queue);
+                       dev->queues->queue[AdapNormCmdQueue].numpending--;
+               } else {
+                       printk(KERN_WARNING "aacraid: FIB timeout (%x).\n", fib->flags);
+                       continue;
+               }
+               spin_unlock_irqrestore(q->lock, flags);
+
+               if (fast) {
+                       /*
+                        *      Doctor the fib
+                        */
+                       *(u32 *)hwfib->data = cpu_to_le32(ST_OK);
+                       hwfib->header.XferState |= cpu_to_le32(AdapterProcessed);
+               }
+
+               FIB_COUNTER_INCREMENT(aac_config.FibRecved);
+
+               if (hwfib->header.Command == cpu_to_le16(NuFileSystem))
+               {
+                       u32 *pstatus = (u32 *)hwfib->data;
+                       if (*pstatus & cpu_to_le32(0xffff0000))
+                               *pstatus = cpu_to_le32(ST_OK);
+               }
+               if (hwfib->header.XferState & cpu_to_le32(NoResponseExpected | Async)) 
+               {
+                       if (hwfib->header.XferState & cpu_to_le32(NoResponseExpected))
+                               FIB_COUNTER_INCREMENT(aac_config.NoResponseRecved);
+                       else 
+                               FIB_COUNTER_INCREMENT(aac_config.AsyncRecved);
+                       /*
+                        *      NOTE:  we cannot touch the fib after this
+                        *          call, because it may have been deallocated.
+                        */
+                       fib->callback(fib->callback_data, fib);
+               } else {
+#if 0
+                       unsigned long flagv;
+                       spin_lock_irqsave(&fib->event_lock, flagv);
+#endif
+                       fib->done = 1;
+#if 0
+                       up(&fib->event_wait);
+                       spin_unlock_irqrestore(&fib->event_lock, flagv);
+#endif
+                       FIB_COUNTER_INCREMENT(aac_config.NormalRecved);
+               }
+               consumed++;
+               spin_lock_irqsave(q->lock, flags);
+       }
+
+       if (consumed > aac_config.peak_fibs)
+               aac_config.peak_fibs = consumed;
+       if (consumed == 0) 
+               aac_config.zero_fibs++;
+
+       spin_unlock_irqrestore(q->lock, flags);
+       return 0;
+}
+
+
+/**
+ *     aac_command_normal      -       handle commands
+ *     @q: queue to process
+ *
+ *     This DPC routine will be queued when the adapter interrupts us to 
+ *     let us know there is a command on our normal priority queue. We will 
+ *     pull off all QE there are and wake up all the waiters before exiting.
+ *     We will take a spinlock out on the queue before operating on it.
+ */
+unsigned int aac_command_normal(struct aac_queue *q)
+{
+       struct aac_dev * dev = q->dev;
+       struct aac_entry *entry;
+       unsigned long flags;
+
+       spin_lock_irqsave(q->lock, flags);
+
+       /*
+        *      Keep pulling response QEs off the response queue and waking
+        *      up the waiters until there are no more QEs. We then return
+        *      back to the system.
+        */
+       while(aac_consumer_get(dev, q, &entry))
+       {
+               struct hw_fib * fib;
+               fib = addr2fib(entry->addr);
+
+               if (dev->aif_thread) {
+                       list_add_tail(&fib->header.FibLinks, &q->cmdq);
+                       aac_consumer_free(dev, q, HostNormCmdQueue);
+#if 0
+                       wake_up_interruptible(&q->cmdready);
+#endif
+               } else {
+                       struct fib fibctx;
+                       aac_consumer_free(dev, q, HostNormCmdQueue);
+                       spin_unlock_irqrestore(q->lock, flags);
+                       memset(&fibctx, 0, sizeof(struct fib));
+                       fibctx.type = FSAFS_NTC_FIB_CONTEXT;
+                       fibctx.size = sizeof(struct fib);
+                       fibctx.fib = fib;
+                       fibctx.data = fib->data;
+                       fibctx.dev = dev;
+                       /*
+                        *      Set the status of this FIB
+                        */
+                       *(u32 *)fib->data = cpu_to_le32(ST_OK);
+                       fib_adapter_complete(&fibctx, sizeof(u32));
+                       spin_lock_irqsave(q->lock, flags);
+               }               
+       }
+       spin_unlock_irqrestore(q->lock, flags);
+       return 0;
+}
diff --git a/xen-2.4.16/drivers/scsi/aacraid/linit.c b/xen-2.4.16/drivers/scsi/aacraid/linit.c
new file mode 100644 (file)
index 0000000..b5026d9
--- /dev/null
@@ -0,0 +1,794 @@
+/*
+ *     Adaptec AAC series RAID controller driver
+ *     (c) Copyright 2001 Red Hat Inc. <alan@redhat.com>
+ *
+ * based on the old aacraid driver that is..
+ * Adaptec aacraid device driver for Linux.
+ *
+ * Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING.  If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Module Name:
+ *   linit.c
+ *
+ * Abstract: Linux Driver entry module for Adaptec RAID Array Controller
+ *                             
+ *     Provides the following driver entry points:
+ *             aac_detect()
+ *             aac_release()
+ *             aac_queuecommand()
+ *             aac_resetcommand()
+ *             aac_biosparm()
+ *     
+ */
+
+#define AAC_DRIVER_VERSION             "0.9.9ac6-TEST"
+#define AAC_DRIVER_BUILD_DATE          __DATE__
+
+#include <xeno/module.h>
+#include <xeno/config.h>
+#include <xeno/kernel.h>
+#include <xeno/init.h>
+#include <xeno/types.h>
+#include <xeno/sched.h>
+#include <xeno/pci.h>
+#include <xeno/spinlock.h>
+/*  #include <xeno/slab.h> */
+/*  #include <xeno/completion.h> */
+/*  #include <asm/semaphore.h> */
+#include <xeno/blk.h>
+#include "scsi.h"
+#include "hosts.h"
+
+#include "aacraid.h"
+#include "sd.h"
+
+#define AAC_DRIVERNAME "aacraid"
+
+MODULE_AUTHOR("Red Hat Inc and Adaptec");
+MODULE_DESCRIPTION("Supports Dell PERC2, 2/Si, 3/Si, 3/Di, PERC 320/DC, Adaptec 2120S, 2200S, 5400S, and HP NetRAID-4M devices. http://domsch.com/xeno/ or http://linux.adaptec.com");
+MODULE_LICENSE("GPL");
+MODULE_PARM(nondasd, "i");
+MODULE_PARM_DESC(nondasd, "Control scanning of hba for nondasd devices. 0=off, 1=on");
+
+static int nondasd=-1;
+
+struct aac_dev *aac_devices[MAXIMUM_NUM_ADAPTERS];
+
+static unsigned aac_count = 0;
+static int aac_cfg_major = -1;
+
+/*
+ * Because of the way Linux names scsi devices, the order in this table has
+ * become important.  Check for on-board Raid first, add-in cards second.
+ *
+ * dmb - For now we add the number of channels to this structure.  
+ * In the future we should add a fib that reports the number of channels
+ * for the card.  At that time we can remove the channels from here
+ */
+static struct aac_driver_ident aac_drivers[] = {
+       { 0x1028, 0x0001, 0x1028, 0x0001, aac_rx_init, "percraid", "DELL    ", "PERCRAID        ", 2 }, /* PERC 2/Si */
+       { 0x1028, 0x0002, 0x1028, 0x0002, aac_rx_init, "percraid", "DELL    ", "PERCRAID        ", 2 }, /* PERC 3/Di */
+       { 0x1028, 0x0003, 0x1028, 0x0003, aac_rx_init, "percraid", "DELL    ", "PERCRAID        ", 2 }, /* PERC 3/Si */
+       { 0x1028, 0x0004, 0x1028, 0x00d0, aac_rx_init, "percraid", "DELL    ", "PERCRAID        ", 2 }, /* PERC 3/Si */
+       { 0x1028, 0x0002, 0x1028, 0x00d1, aac_rx_init, "percraid", "DELL    ", "PERCRAID        ", 2 }, /* PERC 3/Di */
+       { 0x1028, 0x0002, 0x1028, 0x00d9, aac_rx_init, "percraid", "DELL    ", "PERCRAID        ", 2 }, /* PERC 3/Di */
+       { 0x1028, 0x000a, 0x1028, 0x0106, aac_rx_init, "percraid", "DELL    ", "PERCRAID        ", 2 }, /* PERC 3/Di */
+       { 0x1028, 0x000a, 0x1028, 0x011b, aac_rx_init, "percraid", "DELL    ", "PERCRAID        ", 2 }, /* PERC 3/Di */
+       { 0x1028, 0x000a, 0x1028, 0x0121, aac_rx_init, "percraid", "DELL    ", "PERCRAID        ", 2 }, /* PERC 3/Di */
+       { 0x9005, 0x0283, 0x9005, 0x0283, aac_rx_init, "aacraid",  "ADAPTEC ", "catapult        ", 2 }, /* catapult*/
+       { 0x9005, 0x0284, 0x9005, 0x0284, aac_rx_init, "aacraid",  "ADAPTEC ", "tomcat          ", 2 }, /* tomcat*/
+       { 0x9005, 0x0285, 0x9005, 0x0286, aac_rx_init, "aacraid",  "ADAPTEC ", "Adaptec 2120S   ", 1 }, /* Adaptec 2120S (Crusader)*/
+       { 0x9005, 0x0285, 0x9005, 0x0285, aac_rx_init, "aacraid",  "ADAPTEC ", "Adaptec 2200S   ", 2 }, /* Adaptec 2200S (Vulcan)*/
+       { 0x9005, 0x0285, 0x9005, 0x0287, aac_rx_init, "aacraid",  "ADAPTEC ", "Adaptec 2200S   ", 2 }, /* Adaptec 2200S (Vulcan-2m)*/
+       { 0x9005, 0x0285, 0x1028, 0x0287, aac_rx_init, "percraid", "DELL    ", "PERCRAID        ", 2 }, /* Dell PERC 320/DC */
+       { 0x1011, 0x0046, 0x9005, 0x0365, aac_sa_init, "aacraid",  "ADAPTEC ", "Adaptec 5400S   ", 4 }, /* Adaptec 5400S (Mustang)*/
+       { 0x1011, 0x0046, 0x9005, 0x0364, aac_sa_init, "aacraid",  "ADAPTEC ", "AAC-364         ", 4 }, /* Adaptec 5400S (Mustang)*/
+       { 0x1011, 0x0046, 0x9005, 0x1364, aac_sa_init, "percraid", "DELL    ", "PERCRAID        ", 4 }, /* Dell PERC2 "Quad Channel" */
+       { 0x1011, 0x0046, 0x103c, 0x10c2, aac_sa_init, "hpnraid",  "HP      ", "NetRAID-4M      ", 4 }  /* HP NetRAID-4M */
+};
+
+#define NUM_AACTYPES   (sizeof(aac_drivers) / sizeof(struct aac_driver_ident))
+static int num_aacdrivers = NUM_AACTYPES;
+
+#if 0
+static int aac_cfg_ioctl(struct inode * inode, struct file * file, unsigned int cmd, unsigned long arg);
+static int aac_cfg_open(struct inode * inode, struct file * file);
+static int aac_cfg_release(struct inode * inode,struct file * file);
+
+static struct file_operations aac_cfg_fops = {
+/*     owner: THIS_MODULE, */
+       ioctl: aac_cfg_ioctl,
+       open: aac_cfg_open,
+       release: aac_cfg_release
+};
+#endif
+
+static int aac_detect(Scsi_Host_Template *);
+static int aac_release(struct Scsi_Host *);
+static int aac_queuecommand(Scsi_Cmnd *, void (*CompletionRoutine)(Scsi_Cmnd *));
+static int aac_biosparm(Scsi_Disk *, kdev_t, int *);
+#ifdef CONFIG_PROC_FS
+static int aac_procinfo(char *, char **, off_t, int, int, int);
+#endif
+static int aac_ioctl(Scsi_Device *, int, void *);
+static int aac_eh_abort(Scsi_Cmnd * cmd);
+static int aac_eh_device_reset(Scsi_Cmnd* cmd);
+static int aac_eh_bus_reset(Scsi_Cmnd* cmd);
+static int aac_eh_reset(Scsi_Cmnd* cmd);
+
+static void aac_queuedepth(struct Scsi_Host *, Scsi_Device *);
+
+/**
+ *     aac_detect      -       Probe for aacraid cards
+ *     @template: SCSI driver template
+ *
+ *     Probe for AAC Host Adapters initialize, register, and report the 
+ *     configuration of each AAC Host Adapter found.
+ *     Returns the number of adapters successfully initialized and 
+ *     registered.
+ *     Initializes all data necessary for this particular SCSI driver.
+ *     Notes:
+ *     The detect routine must not call any of the mid level functions 
+ *     to queue commands because things are not guaranteed to be set 
+ *     up yet. The detect routine can send commands to the host adapter 
+ *     as long as the program control will not be passed to scsi.c in 
+ *     the processing of the command. Note especially that 
+ *     scsi_malloc/scsi_free must not be called.
+ *
+ */
+static int aac_detect(Scsi_Host_Template *template)
+{
+    int index;
+    int container;
+    u16 vendor_id, device_id;
+    struct Scsi_Host *host_ptr;
+    struct pci_dev *dev = NULL;
+    struct aac_dev *aac;
+    struct fsa_scsi_hba *fsa_dev_ptr;
+    char *name = NULL;
+       
+    printk(KERN_INFO "Red Hat/Adaptec aacraid driver, %s\n", 
+          AAC_DRIVER_BUILD_DATE);
+
+
+    /* 
+    ** XXX SMH: we need to take interrupts during detect, but the SCSI 
+    ** layer is holding this lock with interrupts disabled. I don't 
+    ** know how this works on vanilla linux (we 'down' on a semaphone 
+    ** at one point during the process -- how do we wake?) 
+    */
+    spin_unlock_irq(&io_request_lock);
+
+
+    /* setting up the proc directory structure */
+    template->proc_name = "aacraid";
+
+    for( index = 0; index != num_aacdrivers; index++ )
+    {
+       device_id = aac_drivers[index].device;
+       vendor_id = aac_drivers[index].vendor;
+       name = aac_drivers[index].name;
+       dprintk((KERN_DEBUG "Checking %s %x/%x/%x/%x.\n", 
+                name, vendor_id, device_id,
+                aac_drivers[index].subsystem_vendor,
+                aac_drivers[index].subsystem_device));
+
+       dev = NULL;
+       while((dev = pci_find_device(vendor_id, device_id, dev))) {
+           if (pci_enable_device(dev))
+               continue;
+           pci_set_master(dev);
+           pci_set_dma_mask(dev, 0xFFFFFFFFULL);
+
+           if((dev->subsystem_vendor != aac_drivers[index].subsystem_vendor) || 
+              (dev->subsystem_device != aac_drivers[index].subsystem_device))
+               continue;
+
+           dprintk((KERN_DEBUG "%s device detected.\n", name));
+           dprintk((KERN_DEBUG "%x/%x/%x/%x.\n", vendor_id, device_id, 
+                    aac_drivers[index].subsystem_vendor, 
+                    aac_drivers[index].subsystem_device));
+           /* Increment the host adapter count */
+           aac_count++;
+           /*
+            * scsi_register() allocates memory for a Scsi_Hosts
+            * structure and links it into the linked list of host
+            * adapters. This linked list contains the data for all
+            * possible <supported> scsi hosts.  This is similar to
+            * the Scsi_Host_Template, except that we have one entry
+            * for each actual physical host adapter on the system,
+            * stored as a linked list. If there are two AAC boards,
+            * then we will need to make two Scsi_Host entries, but
+            * there will be only one Scsi_Host_Template entry. The
+            * second argument to scsi_register() specifies the size
+            * of the extra memory we want to hold any device specific
+            * information.  */
+           host_ptr = scsi_register( template, sizeof(struct aac_dev) );
+           /* 
+            * These three parameters can be used to allow for wide SCSI 
+            * and for host adapters that support multiple buses.
+            */
+           host_ptr->max_id = 17;
+           host_ptr->max_lun = 8;
+           host_ptr->max_channel = 1;
+           host_ptr->irq = dev->irq;           /* Adapter IRQ number */
+           /* host_ptr->base = ( char * )(dev->resource[0].start & ~0xff); */
+           host_ptr->base = dev->resource[0].start;
+           scsi_set_pci_device(host_ptr, dev);
+           dprintk((KERN_DEBUG "Device base address = 0x%lx [0x%lx].\n", 
+                    host_ptr->base, dev->resource[0].start));
+           dprintk((KERN_DEBUG "Device irq = 0x%x.\n", dev->irq));
+           /*
+            * The unique_id field is a unique identifier that must
+            * be assigned so that we have some way of identifying
+            * each host adapter properly and uniquely. For hosts 
+            * that do not support more than one card in the
+            * system, this does not need to be set. It is
+            * initialized to zero in scsi_register(). This is the 
+            * value returned as aac->id.
+            */
+           host_ptr->unique_id = aac_count - 1;
+           /*
+            *  This function is called after the device list has
+            *  been built to find the tagged queueing depth 
+            *  supported for each device.
+            */
+           host_ptr->select_queue_depths = aac_queuedepth;
+           aac = (struct aac_dev *)host_ptr->hostdata;
+           /* attach a pointer back to Scsi_Host */
+           aac->scsi_host_ptr = host_ptr;      
+           aac->pdev = dev;
+           aac->cardtype =  index;
+           aac->name = aac->scsi_host_ptr->hostt->name;
+           aac->id = aac->scsi_host_ptr->unique_id;
+           /* Initialize the ordinal number of the device to -1 */
+           fsa_dev_ptr = &(aac->fsa_dev);
+           for( container=0; container < MAXIMUM_NUM_CONTAINERS; container++)
+               fsa_dev_ptr->devno[container] = -1;
+
+           dprintk((KERN_DEBUG "Initializing Hardware...\n"));
+
+           if((*aac_drivers[index].init)(aac , host_ptr->unique_id) != 0)
+           {
+               /* device initialization failed */
+               printk(KERN_WARNING 
+                      "aacraid: device initialization failed.\n");
+               scsi_unregister(host_ptr);
+               aac_count--;
+               continue;
+           } 
+           dprintk((KERN_DEBUG "%s:%d device initialization successful.\n", 
+                    name, host_ptr->unique_id));
+           aac_get_adapter_info(aac);
+
+           dprintk((KERN_DEBUG "%s got adapter info.\n", name));
+
+           if(nondasd != -1) 
+           {
+               /* someone told us how to set this on the cmdline */
+               aac->nondasd_support = (nondasd!=0);
+           }
+           if(aac->nondasd_support != 0){
+               printk(KERN_INFO "%s%d: Non-DASD support enabled\n", 
+                      aac->name, aac->id);
+           }
+           dprintk((KERN_DEBUG "%s:%d options flag %04x.\n", name, 
+                    host_ptr->unique_id, aac->adapter_info.options));
+           if(aac->nondasd_support == 1)
+           {
+               /*
+                * max channel will be the physical
+                * channels plus 1 virtual channel all
+                * containers are on the virtual
+                * channel 0 physical channels are
+                * address by their actual physical
+                * number+1 */
+               host_ptr->max_channel = aac_drivers[index].channels+1;
+           } else {
+               host_ptr->max_channel = 1;
+           }
+           dprintk((KERN_DEBUG "Device has %d logical channels\n", 
+                    host_ptr->max_channel));
+           aac_get_containers(aac);
+           aac_devices[aac_count-1] = aac;
+
+           /*
+            * dmb - we may need to move these 3 parms somewhere else once
+            * we get a fib that can report the actual numbers
+            */
+           host_ptr->max_id = AAC_MAX_TARGET;
+           host_ptr->max_lun = AAC_MAX_LUN;
+                       
+           /*
+            *  If we are PAE capable then our future DMA mappings
+            *  (for read/write commands) are 64bit clean and don't 
+            *  need bouncing. This assumes we do no other 32bit only
+            *  allocations (eg fib table expands) after this point.
+            */
+                        
+           if(aac->pae_support)
+               pci_set_dma_mask(dev, 0xFFFFFFFFFFFFFFFFUL);
+       }
+    }
+
+    /* XXX SMH: restore lock and IPL for SCSI layer */
+    spin_lock_irq(&io_request_lock);
+
+
+#if 0
+    if( aac_count ){
+       if((aac_cfg_major = register_chrdev( 0, "aac", &aac_cfg_fops))<0)
+           printk(KERN_WARNING "aacraid: unable to register 'aac' device.\n");
+    }
+#endif
+
+    template->present = aac_count; /* # of cards of this type found */
+    printk(KERN_DEBUG "aac_detect: returning %d\n", aac_count); 
+    return aac_count;
+}
+
+/**
+ *     aac_release     -       release SCSI host resources
+ *     @host_ptr: SCSI host to clean up
+ *
+ *     Release all resources previously acquired to support a specific Host 
+ *     Adapter and unregister the AAC Host Adapter.
+ *
+ *     BUGS: Does not wait for the thread it kills to die.
+ */
+
+static int aac_release(struct Scsi_Host *host_ptr)
+{
+    struct aac_dev *dev;
+    dprintk((KERN_DEBUG "aac_release.\n"));
+    dev = (struct aac_dev *)host_ptr->hostdata;
+    
+#if 0
+    /*
+     * kill any threads we started
+     */
+    kill_proc(dev->thread_pid, SIGKILL, 0);
+    wait_for_completion(&dev->aif_completion);
+#endif
+    /*
+     * Call the comm layer to detach from this adapter
+     */
+    aac_detach(dev);
+    /* Check free orderings... */
+    /* remove interrupt binding */
+    free_irq(host_ptr->irq, dev);
+    iounmap((void * )dev->regs.sa);
+    /* unregister adapter */
+    scsi_unregister(host_ptr);
+    /*
+     * FIXME: This assumes no hot plugging is going on...
+     */
+    if( aac_cfg_major >= 0 )
+    {
+#if 0
+       unregister_chrdev(aac_cfg_major, "aac");
+#endif
+       aac_cfg_major = -1;
+    }
+    return 0;
+}
+
+/**
+ *     aac_queuecommand        -       queue a SCSI command
+ *     @scsi_cmnd_ptr: SCSI command to queue
+ *     @CompletionRoutine: Function to call on command completion
+ *
+ *     Queues a command for execution by the associated Host Adapter.
+ */ 
+
+static int aac_queuecommand(Scsi_Cmnd *scsi_cmnd_ptr, void (*complete)(Scsi_Cmnd *))
+{
+    int ret;
+    
+    scsi_cmnd_ptr->scsi_done = complete;
+    /*
+     * aac_scsi_cmd() handles command processing, setting the 
+     * result code and calling completion routine. 
+     */
+    if((ret = aac_scsi_cmd(scsi_cmnd_ptr)) != 0)
+       dprintk((KERN_DEBUG "aac_scsi_cmd failed.\n"));
+    return ret;
+} 
+
+/**
+ *     aac_driverinfo          -       Returns the host adapter name
+ *     @host_ptr:      Scsi host to report on
+ *
+ *     Returns a static string describing the device in question
+ */
+
+const char *aac_driverinfo(struct Scsi_Host *host_ptr)
+{
+    struct aac_dev *dev = (struct aac_dev *)host_ptr->hostdata;
+    return aac_drivers[dev->cardtype].name;
+}
+
+/**
+ *     aac_get_driver_ident
+ *     @devtype: index into lookup table
+ *
+ *     Returns a pointer to the entry in the driver lookup table.
+ */
+struct aac_driver_ident* aac_get_driver_ident(int devtype)
+{
+       return  &aac_drivers[devtype];
+}
+
+/**
+ *     aac_biosparm    -       return BIOS parameters for disk
+ *     @disk: SCSI disk object to process
+ *     @device: kdev_t of the disk in question
+ *     @geom: geometry block to fill in
+ *
+ *     Return the Heads/Sectors/Cylinders BIOS Disk Parameters for Disk.  
+ *     The default disk geometry is 64 heads, 32 sectors, and the appropriate 
+ *     number of cylinders so as not to exceed drive capacity.  In order for 
+ *     disks equal to or larger than 1 GB to be addressable by the BIOS
+ *     without exceeding the BIOS limitation of 1024 cylinders, Extended 
+ *     Translation should be enabled.   With Extended Translation enabled, 
+ *     drives between 1 GB inclusive and 2 GB exclusive are given a disk 
+ *     geometry of 128 heads and 32 sectors, and drives above 2 GB inclusive 
+ *     are given a disk geometry of 255 heads and 63 sectors.  However, if 
+ *     the BIOS detects that the Extended Translation setting does not match 
+ *     the geometry in the partition table, then the translation inferred 
+ *     from the partition table will be used by the BIOS, and a warning may 
+ *     be displayed.
+ */
+static int aac_biosparm(Scsi_Disk *disk, kdev_t dev, int *geom)
+{
+    struct diskparm *param = (struct diskparm *)geom;
+    struct buffer_head * buf;
+    
+    dprintk((KERN_DEBUG "aac_biosparm.\n"));
+    
+    /*
+     * Assuming extended translation is enabled - #REVISIT#
+     */
+    if( disk->capacity >= 2 * 1024 * 1024 ) /* 1 GB in 512 byte sectors */
+    {
+       if( disk->capacity >= 4 * 1024 * 1024 ) /* 2 GB in 512 byte sectors */
+       {
+           param->heads = 255;
+           param->sectors = 63;
+       }
+       else
+       {
+           param->heads = 128;
+           param->sectors = 32;
+       }
+    }
+    else
+    {
+       param->heads = 64;
+       param->sectors = 32;
+    }
+    
+    param->cylinders = disk->capacity/(param->heads * param->sectors);
+       
+#if 0
+    /*
+     * Read the first 1024 bytes from the disk device
+     */
+    
+    buf = bread(MKDEV(MAJOR(dev), MINOR(dev)&~0xf), 0, block_size(dev));
+    if(buf == NULL)
+       return 0;
+    /* 
+     * If the boot sector partition table is valid, search for a partition 
+     * table entry whose end_head matches one of the standard geometry 
+     * translations ( 64/32, 128/32, 255/63 ).
+     */
+#endif
+
+        
+    if(*(unsigned short *)(buf->b_data + 0x1fe) == cpu_to_le16(0xaa55))
+    {
+       struct partition *first = (struct partition * )(buf->b_data + 0x1be);
+       struct partition *entry = first;
+       int saved_cylinders = param->cylinders;
+       int num;
+       unsigned char end_head, end_sec;
+       
+       for(num = 0; num < 4; num++)
+       {
+           end_head = entry->end_head;
+           end_sec = entry->end_sector & 0x3f;
+           
+           if(end_head == 63)
+           {
+               param->heads = 64;
+               param->sectors = 32;
+               break;
+           }
+           else if(end_head == 127)
+           {
+               param->heads = 128;
+               param->sectors = 32;
+               break;
+           }
+           else if(end_head == 254) 
+           {
+               param->heads = 255;
+               param->sectors = 63;
+               break;
+           }
+           entry++;
+       }
+       
+       if(num == 4)
+       {
+           end_head = first->end_head;
+           end_sec = first->end_sector & 0x3f;
+       }
+       
+       param->cylinders = disk->capacity / (param->heads * param->sectors);
+       
+       if(num < 4 && end_sec == param->sectors)
+       {
+           if(param->cylinders != saved_cylinders)
+               dprintk((KERN_DEBUG "Adopting geometry: heads=%d, "
+                        "sectors=%d from partition table %d.\n",
+                        param->heads, param->sectors, num));
+       }
+       else if(end_head > 0 || end_sec > 0)
+       {
+           dprintk((KERN_DEBUG "Strange geometry: heads=%d, "
+                    "sectors=%d in partition table %d.\n",
+                    end_head + 1, end_sec, num));
+           dprintk((KERN_DEBUG "Using geometry: heads=%d, sectors=%d.\n",
+                    param->heads, param->sectors));
+       }
+    }
+#if 0
+    brelse(buf);
+#endif
+    return 0;
+}
+
+/**
+ *     aac_queuedepth          -       compute queue depths
+ *     @host:  SCSI host in question
+ *     @dev:   SCSI device we are considering
+ *
+ *     Selects queue depths for each target device based on the host adapter's
+ *     total capacity and the queue depth supported by the target device.
+ *     A queue depth of one automatically disables tagged queueing.
+ */
+
+static void aac_queuedepth(struct Scsi_Host * host, Scsi_Device * dev )
+{
+    Scsi_Device * dptr;
+    
+    dprintk((KERN_DEBUG "aac_queuedepth.\n"));
+    dprintk((KERN_DEBUG "Device #   Q Depth   Online\n"));
+    dprintk((KERN_DEBUG "---------------------------\n"));
+    for(dptr = dev; dptr != NULL; dptr = dptr->next)
+    {
+       if(dptr->host == host)
+       {
+           dptr->queue_depth = 10;             
+           dprintk((KERN_DEBUG "  %2d         %d        %d\n", 
+                    dptr->id, dptr->queue_depth, dptr->online));
+       }
+    }
+}
+
+
+/**
+ *     aac_eh_abort    -       Abort command if possible.
+ *     @cmd:   SCSI command block to abort
+ *
+ *     Called when the midlayer wishes to abort a command. We don't support
+ *     this facility, and our firmware looks after life for us. We just
+ *     report this as failing
+ */
+static int aac_eh_abort(Scsi_Cmnd *cmd)
+{
+    return FAILED;
+}
+
+/**
+ *     aac_eh_device_reset     -       Reset command handling
+ *     @cmd:   SCSI command block causing the reset
+ *
+ *     Issue a reset of a SCSI device. We are ourselves not truely a SCSI
+ *     controller and our firmware will do the work for us anyway. Thus this
+ *     is a no-op. We just return FAILED.
+ */
+
+static int aac_eh_device_reset(Scsi_Cmnd *cmd)
+{
+    return FAILED;
+}
+
+/**
+ *     aac_eh_bus_reset        -       Reset command handling
+ *     @scsi_cmd:      SCSI command block causing the reset
+ *
+ *     Issue a reset of a SCSI bus. We are ourselves not truely a SCSI
+ *     controller and our firmware will do the work for us anyway. Thus this
+ *     is a no-op. We just return FAILED.
+ */
+
+static int aac_eh_bus_reset(Scsi_Cmnd* cmd)
+{
+    return FAILED;
+}
+
+/**
+ *     aac_eh_hba_reset        -       Reset command handling
+ *     @scsi_cmd:      SCSI command block causing the reset
+ *
+ *     Issue a reset of a SCSI host. If things get this bad then arguably we should
+ *     go take a look at what the host adapter is doing and see if something really
+ *     broke (as can occur at least on my Dell QC card if a drive keeps failing spinup)
+ */
+
+static int aac_eh_reset(Scsi_Cmnd* cmd)
+{
+    printk(KERN_ERR "aacraid: Host adapter reset request. SCSI hang ?\n");
+    return FAILED;
+}
+
+/**
+ *     aac_ioctl       -       Handle SCSI ioctls
+ *     @scsi_dev_ptr: scsi device to operate upon
+ *     @cmd: ioctl command to use issue
+ *     @arg: ioctl data pointer
+ *
+ *     Issue an ioctl on an aacraid device. Returns a standard unix error code or
+ *     zero for success
+ */
+static int aac_ioctl(Scsi_Device * scsi_dev_ptr, int cmd, void * arg)
+{
+    struct aac_dev *dev;
+    dprintk((KERN_DEBUG "aac_ioctl.\n"));
+    dev = (struct aac_dev *)scsi_dev_ptr->host->hostdata;
+    return aac_do_ioctl(dev, cmd, arg);
+}
+
+/**
+ *     aac_cfg_open            -       open a configuration file
+ *     @inode: inode being opened
+ *     @file: file handle attached
+ *
+ *     Called when the configuration device is opened. Does the needed
+ *     set up on the handle and then returns
+ *
+ *     Bugs: This needs extending to check a given adapter is present
+ *     so we can support hot plugging, and to ref count adapters.
+ */
+
+static int aac_cfg_open(struct inode * inode, struct file * file )
+{
+    unsigned minor_number = MINOR(inode->i_rdev);
+    if(minor_number >= aac_count)
+       return -ENODEV;
+    return 0;
+}
+
+/**
+ *     aac_cfg_release         -       close down an AAC config device
+ *     @inode: inode of configuration file
+ *     @file: file handle of configuration file
+ *     
+ *     Called when the last close of the configuration file handle
+ *     is performed.
+ */
+static int aac_cfg_release(struct inode * inode, struct file * file )
+{
+    return 0;
+}
+
+/**
+ *     aac_cfg_ioctl           -       AAC configuration request
+ *     @inode: inode of device
+ *     @file: file handle
+ *     @cmd: ioctl command code
+ *     @arg: argument
+ *
+ *     Handles a configuration ioctl. Currently this involves wrapping it
+ *     up and feeding it into the nasty windowsalike glue layer.
+ *
+ *     Bugs: Needs locking against parallel ioctls lower down
+ *     Bugs: Needs to handle hot plugging
+ */
+static int aac_cfg_ioctl(struct inode * inode,  struct file * file, unsigned int cmd, unsigned long arg )
+{
+    struct aac_dev *dev = aac_devices[MINOR(inode->i_rdev)];
+    return aac_do_ioctl(dev, cmd, (void *)arg);
+}
+
+/*
+ *     To use the low level SCSI driver support using the linux kernel loadable 
+ *     module interface we should initialize the global variable driver_interface  
+ *     (datatype Scsi_Host_Template) and then include the file scsi_module.c.
+ */
+static Scsi_Host_Template driver_template = {
+/*     module:                 THIS_MODULE, */
+       name:                   "AAC",
+/*     proc_info:              aac_procinfo, */
+       detect:                 aac_detect,
+       release:                aac_release,
+       info:                   aac_driverinfo,
+       ioctl:                  aac_ioctl,
+       queuecommand:           aac_queuecommand,
+       bios_param:             aac_biosparm,   
+       can_queue:              AAC_NUM_IO_FIB, 
+       this_id:                16,
+       sg_tablesize:           16,
+       max_sectors:            128,
+       cmd_per_lun:            AAC_NUM_IO_FIB,
+       eh_abort_handler:       aac_eh_abort,
+       eh_device_reset_handler:aac_eh_device_reset,
+       eh_bus_reset_handler:   aac_eh_bus_reset,
+       eh_host_reset_handler:  aac_eh_reset,
+       use_new_eh_code:        1, 
+
+       use_clustering:         ENABLE_CLUSTERING,
+};
+
+#include "../scsi_module.c.inc"
+
+#ifdef CONFIG_PROC_FS
+/**
+ *     aac_procinfo    -       Implement /proc/scsi/<drivername>/<n>
+ *     @proc_buffer: memory buffer for I/O
+ *     @start_ptr: pointer to first valid data
+ *     @offset: offset into file
+ *     @bytes_available: space left
+ *     @host_no: scsi host ident
+ *     @write: direction of I/O
+ *
+ *     Used to export driver statistics and other infos to the world outside 
+ *     the kernel using the proc file system. Also provides an interface to
+ *     feed the driver with information.
+ *
+ *             For reads
+ *                     - if offset > 0 return 0
+ *                     - if offset == 0 write data to proc_buffer and set the start_ptr to
+ *                     beginning of proc_buffer, return the number of characters written.
+ *             For writes
+ *                     - writes currently not supported, return 0
+ *
+ *     Bugs:   Only offset zero is handled
+ */
+
+static int aac_procinfo(char *proc_buffer, char **start_ptr,off_t offset,
+                       int bytes_available, int host_no, int write)
+{
+    if(write || offset > 0)
+       return 0;
+    *start_ptr = proc_buffer;
+    return sprintf(proc_buffer, "%s  %d\n", 
+                  "Raid Controller, scsi hba number", host_no);
+}
+#endif
+
+EXPORT_NO_SYMBOLS;
diff --git a/xen-2.4.16/drivers/scsi/aacraid/rx.c b/xen-2.4.16/drivers/scsi/aacraid/rx.c
new file mode 100644 (file)
index 0000000..4ee6db4
--- /dev/null
@@ -0,0 +1,457 @@
+/*
+ *     Adaptec AAC series RAID controller driver
+ *     (c) Copyright 2001 Red Hat Inc. <alan@redhat.com>
+ *
+ * based on the old aacraid driver that is..
+ * Adaptec aacraid device driver for Linux.
+ *
+ * Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING.  If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Module Name:
+ *  rx.c
+ *
+ * Abstract: Hardware miniport for Drawbridge specific hardware functions.
+ *
+ */
+
+#include <xeno/config.h>
+#include <xeno/kernel.h>
+#include <xeno/init.h>
+#include <xeno/types.h>
+#include <xeno/sched.h>
+#include <xeno/pci.h>
+/*  #include <xeno/spinlock.h> */
+/*  #include <xeno/slab.h> */
+#include <xeno/blk.h>
+#include <xeno/delay.h>
+/*  #include <xeno/completion.h> */
+/*  #include <asm/semaphore.h> */
+#include "scsi.h"
+#include "hosts.h"
+
+#include "aacraid.h"
+
+static void aac_rx_intr(int irq, void *dev_id, struct pt_regs *regs)
+{
+    struct aac_dev *dev = dev_id;
+    unsigned long bellbits;
+    u8 intstat, mask;
+    intstat = rx_readb(dev, MUnit.OISR);
+    /*
+     * Read mask and invert because drawbridge is reversed.
+     * This allows us to only service interrupts that have 
+     * been enabled.
+     */
+    mask = ~(rx_readb(dev, MUnit.OIMR));
+    /* Check to see if this is our interrupt.  If it isn't just return */
+    
+    if (intstat & mask) 
+    {
+       bellbits = rx_readl(dev, OutboundDoorbellReg);
+       if (bellbits & DoorBellPrintfReady) {
+           aac_printf(dev, le32_to_cpu(rx_readl (dev, IndexRegs.Mailbox[5])));
+           rx_writel(dev, MUnit.ODR,DoorBellPrintfReady);
+           rx_writel(dev, InboundDoorbellReg,DoorBellPrintfDone);
+       }
+       else if (bellbits & DoorBellAdapterNormCmdReady) {
+           aac_command_normal(&dev->queues->queue[HostNormCmdQueue]);
+           rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdReady);
+       }
+       else if (bellbits & DoorBellAdapterNormRespReady) {
+           aac_response_normal(&dev->queues->queue[HostNormRespQueue]);
+           rx_writel(dev, MUnit.ODR,DoorBellAdapterNormRespReady);
+       }
+       else if (bellbits & DoorBellAdapterNormCmdNotFull) {
+           rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull);
+       }
+       else if (bellbits & DoorBellAdapterNormRespNotFull) {
+           rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull);
+           rx_writel(dev, MUnit.ODR, DoorBellAdapterNormRespNotFull);
+       }
+    }
+}
+
+/**
+ *     aac_rx_enable_interrupt -       Enable event reporting
+ *     @dev: Adapter
+ *     @event: Event to enable
+ *
+ *     Enable event reporting from the i960 for a given event.
+ */
+static void aac_rx_enable_interrupt(struct aac_dev * dev, u32 event)
+{
+    switch (event) {
+       
+    case HostNormCmdQue:
+       dev->irq_mask &= ~(OUTBOUNDDOORBELL_1);
+       break;
+       
+    case HostNormRespQue:
+       dev->irq_mask &= ~(OUTBOUNDDOORBELL_2);
+       break;
+       
+    case AdapNormCmdNotFull:
+       dev->irq_mask &= ~(OUTBOUNDDOORBELL_3);
+       break;
+       
+    case AdapNormRespNotFull:
+       dev->irq_mask &= ~(OUTBOUNDDOORBELL_4);
+       break;
+    }
+}
+
+/**
+ *     aac_rx_disable_interrupt        -       Disable event reporting
+ *     @dev: Adapter
+ *     @event: Event to enable
+ *
+ *     Disable event reporting from the i960 for a given event.
+ */
+
+static void aac_rx_disable_interrupt(struct aac_dev *dev, u32 event)
+{
+    switch (event) {
+       
+    case HostNormCmdQue:
+       dev->irq_mask |= (OUTBOUNDDOORBELL_1);
+       break;
+       
+    case HostNormRespQue:
+       dev->irq_mask |= (OUTBOUNDDOORBELL_2);
+       break;
+       
+    case AdapNormCmdNotFull:
+       dev->irq_mask |= (OUTBOUNDDOORBELL_3);
+       break;
+       
+    case AdapNormRespNotFull:
+       dev->irq_mask |= (OUTBOUNDDOORBELL_4);
+       break;
+    }
+}
+
+/**
+ *     rx_sync_cmd     -       send a command and wait
+ *     @dev: Adapter
+ *     @command: Command to execute
+ *     @p1: first parameter
+ *     @ret: adapter status
+ *
+ *     This routine will send a synchronous comamnd to the adapter and wait 
+ *     for its completion.
+ */
+
+static int rx_sync_cmd(struct aac_dev *dev, u32 command, u32 p1, u32 *status)
+{
+    unsigned long start;
+    int ok;
+    /*
+     * Write the command into Mailbox 0
+     */
+    rx_writel(dev, InboundMailbox0, cpu_to_le32(command));
+    /*
+     * Write the parameters into Mailboxes 1 - 4
+     */
+    rx_writel(dev, InboundMailbox1, cpu_to_le32(p1));
+    rx_writel(dev, InboundMailbox2, 0);
+    rx_writel(dev, InboundMailbox3, 0);
+    rx_writel(dev, InboundMailbox4, 0);
+    /*
+     * Clear the synch command doorbell to start on a clean slate.
+     */
+    rx_writel(dev, OutboundDoorbellReg, OUTBOUNDDOORBELL_0);
+    /*
+     * Disable doorbell interrupts
+     */
+    rx_writeb(dev, MUnit.OIMR, rx_readb(dev, MUnit.OIMR) | 0x04);
+    /*
+     * Force the completion of the mask register write before issuing
+     * the interrupt.
+     */
+    rx_readb (dev, MUnit.OIMR);
+    /*
+     * Signal that there is a new synch command
+     */
+    rx_writel(dev, InboundDoorbellReg, INBOUNDDOORBELL_0);
+    
+    ok = 0;
+    start = jiffies;
+    
+    /*
+     * Wait up to 30 seconds
+     */
+    while (time_before(jiffies, start+30*HZ)) 
+    {
+       /* Delay 5 microseconds to let Mon960 get info. */
+       udelay(5);      
+       /*
+        *      Mon960 will set doorbell0 bit when its completed the command.
+        */
+       if (rx_readl(dev, OutboundDoorbellReg) & OUTBOUNDDOORBELL_0) {
+           /*
+            *  Clear the doorbell.
+            */
+           rx_writel(dev, OutboundDoorbellReg, OUTBOUNDDOORBELL_0);
+           ok = 1;
+           break;
+       }
+#if 0
+       /*
+        *      Yield the processor in case we are slow 
+        */
+       set_current_state(TASK_UNINTERRUPTIBLE);
+       schedule_timeout(1);
+#else 
+       /* XXX SMH: not in xen we don't */
+       mdelay(50); 
+#endif
+       
+    }
+    if (ok != 1) {
+       /*
+        *      Restore interrupt mask even though we timed out
+        */
+       rx_writeb(dev, MUnit.OIMR, rx_readl(dev, MUnit.OIMR) & 0xfb);
+       return -ETIMEDOUT;
+    }
+    /*
+     * Pull the synch status from Mailbox 0.
+     */
+    *status = le32_to_cpu(rx_readl(dev, IndexRegs.Mailbox[0]));
+    /*
+     * Clear the synch command doorbell.
+     */
+    rx_writel(dev, OutboundDoorbellReg, OUTBOUNDDOORBELL_0);
+    /*
+     * Restore interrupt mask
+     */
+    rx_writeb(dev, MUnit.OIMR, rx_readl(dev, MUnit.OIMR) & 0xfb);
+    return 0;
+    
+}
+
+/**
+ *     aac_rx_interrupt_adapter        -       interrupt adapter
+ *     @dev: Adapter
+ *
+ *     Send an interrupt to the i960 and breakpoint it.
+ */
+
+static void aac_rx_interrupt_adapter(struct aac_dev *dev)
+{
+    u32 ret;
+    rx_sync_cmd(dev, BREAKPOINT_REQUEST, 0, &ret);
+}
+
+/**
+ *     aac_rx_notify_adapter           -       send an event to the adapter
+ *     @dev: Adapter
+ *     @event: Event to send
+ *
+ *     Notify the i960 that something it probably cares about has
+ *     happened.
+ */
+
+static void aac_rx_notify_adapter(struct aac_dev *dev, u32 event)
+{
+    switch (event) {
+       
+    case AdapNormCmdQue:
+       rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_1);
+       break;
+    case HostNormRespNotFull:
+       rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_4);
+       break;
+    case AdapNormRespQue:
+       rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_2);
+       break;
+    case HostNormCmdNotFull:
+       rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_3);
+       break;
+    case HostShutdown:
+//             rx_sync_cmd(dev, HOST_CRASHING, 0, 0, 0, 0, &ret);
+       break;
+    case FastIo:
+       rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_6);
+       break;
+    case AdapPrintfDone:
+       rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_5);
+       break;
+    default:
+       BUG();
+       break;
+    }
+}
+
+/**
+ *     aac_rx_start_adapter            -       activate adapter
+ *     @dev:   Adapter
+ *
+ *     Start up processing on an i960 based AAC adapter
+ */
+
+static void aac_rx_start_adapter(struct aac_dev *dev)
+{
+    u32 status;
+    struct aac_init *init;
+    
+    init = dev->init;
+    printk("aac_rx_start: dev is %p, init is %p\n", dev, init); 
+    init->HostElapsedSeconds = cpu_to_le32(jiffies/HZ);
+    /*
+     * Tell the adapter we are back and up and running so it will scan
+     * its command queues and enable our interrupts
+     */
+    dev->irq_mask = (DoorBellPrintfReady | OUTBOUNDDOORBELL_1 | 
+                    OUTBOUNDDOORBELL_2 | OUTBOUNDDOORBELL_3 | 
+                    OUTBOUNDDOORBELL_4);
+    /*
+     * First clear out all interrupts.  Then enable the one's that we
+     * can handle.
+     */
+    rx_writeb(dev, MUnit.OIMR, 0xff);
+    rx_writel(dev, MUnit.ODR, 0xffffffff);
+//     rx_writeb(dev, MUnit.OIMR, ~(u8)OUTBOUND_DOORBELL_INTERRUPT_MASK);
+    rx_writeb(dev, MUnit.OIMR, 0xfb);
+    
+    // We can only use a 32 bit address here
+    rx_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS, 
+               (u32)(ulong)dev->init_pa, &status);
+}
+
+/**
+ *     aac_rx_init     -       initialize an i960 based AAC card
+ *     @dev: device to configure
+ *     @devnum: adapter number
+ *
+ *     Allocate and set up resources for the i960 based AAC variants. The 
+ *     device_interface in the commregion will be allocated and linked 
+ *     to the comm region.
+ */
+
+int aac_rx_init(struct aac_dev *dev, unsigned long num)
+{
+    unsigned long start;
+    unsigned long status;
+    int instance;
+    const char * name;
+    
+    dev->devnum = num;
+    instance = dev->id;
+    name     = dev->name;
+    
+    dprintk((KERN_ERR "aac_rx_init called, num %ld, scsi host ptr = %p\n", 
+            num, (void *)(dev->scsi_host_ptr))); 
+    
+    dprintk((KERN_ERR "scsi_host_ptr->base is %p\n", 
+            (void *)dev->scsi_host_ptr->base)); 
+    /*
+     * Map in the registers from the adapter.
+     */
+    if((dev->regs.rx = (struct rx_registers *)
+       ioremap((unsigned long)dev->scsi_host_ptr->base, 8192))==NULL)
+    {  
+       printk(KERN_WARNING "aacraid: unable to map i960.\n" );
+       return -1;
+    }
+    
+//     dprintk((KERN_ERR "aac_rx_init: AAA\n")); 
+    /*
+     * Check to see if the board failed any self tests.
+     */
+    if (rx_readl(dev, IndexRegs.Mailbox[7]) & SELF_TEST_FAILED) {
+       printk(KERN_ERR "%s%d: adapter self-test failed.\n", 
+              dev->name, instance);
+       return -1;
+    }
+    
+    
+//     dprintk((KERN_ERR "aac_rx_init: BBB\n")); 
+    /*
+     * Check to see if the board panic'd while booting.
+     */
+    if (rx_readl(dev, IndexRegs.Mailbox[7]) & KERNEL_PANIC) {
+       printk(KERN_ERR "%s%d: adapter kernel panic'd.\n", 
+              dev->name, instance);
+       return -1;
+    }
+    start = jiffies;
+    
+//     dprintk((KERN_ERR "aac_rx_init: DDD\n")); 
+    /*
+     * Wait for the adapter to be up and running. Wait up to 3 minutes
+     */
+    while (!(rx_readl(dev, IndexRegs.Mailbox[7]) & KERNEL_UP_AND_RUNNING)) 
+    {
+       if(time_after(jiffies, start+180*HZ))
+       {
+           status = rx_readl(dev, IndexRegs.Mailbox[7]) >> 16;
+           printk(KERN_ERR "%s%d: adapter kernel failed to start,"
+                  "init status = %ld.\n", dev->name, 
+                  instance, status);
+           return -1;
+       }
+// dprintk((KERN_ERR "aac_rx_init: XXX\n")); 
+       
+#if 0 
+       set_current_state(TASK_UNINTERRUPTIBLE);
+       schedule_timeout(1);
+#else
+       /* XXX SMH: no sleeping for us (we're the xen idle task) */
+       mdelay(50); 
+#endif
+       
+    }
+    
+//     dprintk((KERN_ERR "aac_rx_init: ZZZ!\n")); 
+    if (request_irq(dev->scsi_host_ptr->irq, aac_rx_intr, 
+                   SA_SHIRQ|SA_INTERRUPT, "aacraid", (void *)dev) < 0) 
+    {
+       printk(KERN_ERR "%s%d: Interrupt unavailable.\n", 
+              name, instance);
+       return -1;
+    }
+    /*
+     * Fill in the function dispatch table.
+     */
+    dev->a_ops.adapter_interrupt = aac_rx_interrupt_adapter;
+    dev->a_ops.adapter_enable_int = aac_rx_enable_interrupt;
+    dev->a_ops.adapter_disable_int = aac_rx_disable_interrupt;
+    dev->a_ops.adapter_notify = aac_rx_notify_adapter;
+    dev->a_ops.adapter_sync_cmd = rx_sync_cmd;
+    
+    if (aac_init_adapter(dev) == NULL)
+       return -1;
+#if 0
+    /*
+     * Start any kernel threads needed
+     */
+    dev->thread_pid = kernel_thread((int (*)(void *))aac_command_thread, 
+                                   dev, 0);
+#else 
+    /* XXX SMH: just put in a softirq handler instead... */
+    open_softirq(SCSI_LOW_SOFTIRQ, aac_command_thread, dev); 
+#endif
+
+    /*
+     * Tell the adapter that all is configured, and it can start
+     * accepting requests
+     */
+    aac_rx_start_adapter(dev);
+    return 0;
+}
diff --git a/xen-2.4.16/drivers/scsi/aacraid/sa.c b/xen-2.4.16/drivers/scsi/aacraid/sa.c
new file mode 100644 (file)
index 0000000..edb5679
--- /dev/null
@@ -0,0 +1,406 @@
+/*
+ *     Adaptec AAC series RAID controller driver
+ *     (c) Copyright 2001 Red Hat Inc. <alan@redhat.com>
+ *
+ * based on the old aacraid driver that is..
+ * Adaptec aacraid device driver for Linux.
+ *
+ * Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING.  If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Module Name:
+ *  sa.c
+ *
+ * Abstract: Drawbridge specific support functions
+ *
+ */
+
+#include <xeno/config.h>
+#include <xeno/kernel.h>
+#include <xeno/init.h>
+#include <xeno/types.h>
+#include <xeno/sched.h>
+/*  #include <xeno/pci.h> */
+/*  #include <xeno/spinlock.h> */
+/*  #include <xeno/slab.h> */
+#include <xeno/blk.h>
+#include <xeno/delay.h>
+/*  #include <xeno/completion.h> */
+/*  #include <asm/semaphore.h> */
+#include "scsi.h"
+#include "hosts.h"
+
+#include "aacraid.h"
+
+static void aac_sa_intr(int irq, void *dev_id, struct pt_regs *regs)
+{
+       struct aac_dev *dev = dev_id;
+       unsigned short intstat, mask;
+
+       intstat = sa_readw(dev, DoorbellReg_p);
+       /*
+        *      Read mask and invert because drawbridge is reversed.
+        *      This allows us to only service interrupts that have been enabled.
+        */
+       mask = ~(sa_readw(dev, SaDbCSR.PRISETIRQMASK));
+
+       /* Check to see if this is our interrupt.  If it isn't just return */
+
+       if (intstat & mask) {
+               if (intstat & PrintfReady) {
+                       aac_printf(dev, le32_to_cpu(sa_readl(dev, Mailbox5)));
+                       sa_writew(dev, DoorbellClrReg_p, PrintfReady); /* clear PrintfReady */
+                       sa_writew(dev, DoorbellReg_s, PrintfDone);
+               } else if (intstat & DOORBELL_1) {      // dev -> Host Normal Command Ready
+                       aac_command_normal(&dev->queues->queue[HostNormCmdQueue]);
+                       sa_writew(dev, DoorbellClrReg_p, DOORBELL_1);
+               } else if (intstat & DOORBELL_2) {      // dev -> Host Normal Response Ready
+                       aac_response_normal(&dev->queues->queue[HostNormRespQueue]);
+                       sa_writew(dev, DoorbellClrReg_p, DOORBELL_2);
+               } else if (intstat & DOORBELL_3) {      // dev -> Host Normal Command Not Full
+                       sa_writew(dev, DoorbellClrReg_p, DOORBELL_3);
+               } else if (intstat & DOORBELL_4) {      // dev -> Host Normal Response Not Full
+                       sa_writew(dev, DoorbellClrReg_p, DOORBELL_4);
+               }
+       }
+}
+
+/**
+ *     aac_sa_enable_interrupt -       enable an interrupt event
+ *     @dev: Which adapter to enable.
+ *     @event: Which adapter event.
+ *
+ *     This routine will enable the corresponding adapter event to cause an interrupt on 
+ *     the host.
+ */
+void aac_sa_enable_interrupt(struct aac_dev *dev, u32 event)
+{
+       switch (event) {
+
+       case HostNormCmdQue:
+               sa_writew(dev, SaDbCSR.PRICLEARIRQMASK, DOORBELL_1);
+               break;
+
+       case HostNormRespQue:
+               sa_writew(dev, SaDbCSR.PRICLEARIRQMASK, DOORBELL_2);
+               break;
+
+       case AdapNormCmdNotFull:
+               sa_writew(dev, SaDbCSR.PRICLEARIRQMASK, DOORBELL_3);
+               break;
+
+       case AdapNormRespNotFull:
+               sa_writew(dev, SaDbCSR.PRICLEARIRQMASK, DOORBELL_4);
+               break;
+       }
+}
+
+/**
+ *     aac_sa_disable_interrupt        -       disable an interrupt event
+ *     @dev: Which adapter to enable.
+ *     @event: Which adapter event.
+ *
+ *     This routine will enable the corresponding adapter event to cause an interrupt on 
+ *     the host.
+ */
+
+void aac_sa_disable_interrupt (struct aac_dev *dev, u32 event)
+{
+       switch (event) {
+
+       case HostNormCmdQue:
+               sa_writew(dev, SaDbCSR.PRISETIRQMASK, DOORBELL_1);
+               break;
+
+       case HostNormRespQue:
+               sa_writew(dev, SaDbCSR.PRISETIRQMASK, DOORBELL_2);
+               break;
+
+       case AdapNormCmdNotFull:
+               sa_writew(dev, SaDbCSR.PRISETIRQMASK, DOORBELL_3);
+               break;
+
+       case AdapNormRespNotFull:
+               sa_writew(dev, SaDbCSR.PRISETIRQMASK, DOORBELL_4);
+               break;
+       }
+}
+
+/**
+ *     aac_sa_notify_adapter           -       handle adapter notification
+ *     @dev:   Adapter that notification is for
+ *     @event: Event to notidy
+ *
+ *     Notify the adapter of an event
+ */
+void aac_sa_notify_adapter(struct aac_dev *dev, u32 event)
+{
+       switch (event) {
+
+       case AdapNormCmdQue:
+               sa_writew(dev, DoorbellReg_s,DOORBELL_1);
+               break;
+       case HostNormRespNotFull:
+               sa_writew(dev, DoorbellReg_s,DOORBELL_4);
+               break;
+       case AdapNormRespQue:
+               sa_writew(dev, DoorbellReg_s,DOORBELL_2);
+               break;
+       case HostNormCmdNotFull:
+               sa_writew(dev, DoorbellReg_s,DOORBELL_3);
+               break;
+       case HostShutdown:
+               //sa_sync_cmd(dev, HOST_CRASHING, 0, &ret);
+               break;
+       case FastIo:
+               sa_writew(dev, DoorbellReg_s,DOORBELL_6);
+               break;
+       case AdapPrintfDone:
+               sa_writew(dev, DoorbellReg_s,DOORBELL_5);
+               break;
+       default:
+               BUG();
+               break;
+       }
+}
+
+
+/**
+ *     sa_sync_cmd     -       send a command and wait
+ *     @dev: Adapter
+ *     @command: Command to execute
+ *     @p1: first parameter
+ *     @ret: adapter status
+ *
+ *     This routine will send a synchronous comamnd to the adapter and wait 
+ *     for its completion.
+ */
+
+static int sa_sync_cmd(struct aac_dev *dev, u32 command, u32 p1, u32 *ret)
+{
+       unsigned long start;
+       int ok;
+       /*
+        *      Write the Command into Mailbox 0
+        */
+       sa_writel(dev, Mailbox0, cpu_to_le32(command));
+       /*
+        *      Write the parameters into Mailboxes 1 - 4
+        */
+       sa_writel(dev, Mailbox1, cpu_to_le32(p1));
+       sa_writel(dev, Mailbox2, 0);
+       sa_writel(dev, Mailbox3, 0);
+       sa_writel(dev, Mailbox4, 0);
+       /*
+        *      Clear the synch command doorbell to start on a clean slate.
+        */
+       sa_writew(dev, DoorbellClrReg_p, DOORBELL_0);
+       /*
+        *      Signal that there is a new synch command
+        */
+       sa_writew(dev, DoorbellReg_s, DOORBELL_0);
+
+       ok = 0;
+       start = jiffies;
+
+       while(time_before(jiffies, start+30*HZ))
+       {
+               /*
+                *      Delay 5uS so that the monitor gets access
+                */
+               udelay(5);
+               /*
+                *      Mon110 will set doorbell0 bit when it has 
+                *      completed the command.
+                */
+               if(sa_readw(dev, DoorbellReg_p) & DOORBELL_0)  {
+                       ok = 1;
+                       break;
+               }
+#if 0
+               set_current_state(TASK_UNINTERRUPTIBLE);
+               schedule_timeout(1);
+#endif
+               mdelay(100); 
+
+       }
+
+       if (ok != 1)
+               return -ETIMEDOUT;
+       /*
+        *      Clear the synch command doorbell.
+        */
+       sa_writew(dev, DoorbellClrReg_p, DOORBELL_0);
+       /*
+        *      Pull the synch status from Mailbox 0.
+        */
+       *ret = le32_to_cpu(sa_readl(dev, Mailbox0));
+       return 0;
+}
+
+/**
+ *     aac_sa_interrupt_adapter        -       interrupt an adapter
+ *     @dev: Which adapter to enable.
+ *
+ *     Breakpoint an adapter.
+ */
+static void aac_sa_interrupt_adapter (struct aac_dev *dev)
+{
+       u32 ret;
+       sa_sync_cmd(dev, BREAKPOINT_REQUEST, 0, &ret);
+}
+
+/**
+ *     aac_sa_start_adapter            -       activate adapter
+ *     @dev:   Adapter
+ *
+ *     Start up processing on an ARM based AAC adapter
+ */
+
+static void aac_sa_start_adapter(struct aac_dev *dev)
+{
+       u32 ret;
+       struct aac_init *init;
+       /*
+        * Fill in the remaining pieces of the init.
+        */
+       init = dev->init;
+       init->HostElapsedSeconds = cpu_to_le32(jiffies/HZ);
+
+       dprintk(("INIT\n"));
+       /*
+        * Tell the adapter we are back and up and running so it will scan its command
+        * queues and enable our interrupts
+        */
+       dev->irq_mask = (PrintfReady | DOORBELL_1 | DOORBELL_2 | DOORBELL_3 | DOORBELL_4);
+       /*
+        *      First clear out all interrupts.  Then enable the one's that 
+        *      we can handle.
+        */
+       dprintk(("MASK\n"));
+       sa_writew(dev, SaDbCSR.PRISETIRQMASK, cpu_to_le16(0xffff));
+       sa_writew(dev, SaDbCSR.PRICLEARIRQMASK, (PrintfReady | DOORBELL_1 | DOORBELL_2 | DOORBELL_3 | DOORBELL_4));
+       dprintk(("SYNCCMD\n"));
+       /* We can only use a 32 bit address here */
+       sa_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS, (u32)(ulong)dev->init_pa, &ret);
+}
+
+/**
+ *     aac_sa_init     -       initialize an ARM based AAC card
+ *     @dev: device to configure
+ *     @devnum: adapter number
+ *
+ *     Allocate and set up resources for the ARM based AAC variants. The 
+ *     device_interface in the commregion will be allocated and linked 
+ *     to the comm region.
+ */
+
+int aac_sa_init(struct aac_dev *dev, unsigned long devnum)
+{
+       unsigned long start;
+       unsigned long status;
+       int instance;
+       const char *name;
+
+       dev->devnum = devnum;
+
+       dprintk(("PREINST\n"));
+       instance = dev->id;
+       name     = dev->name;
+
+       /*
+        *      Map in the registers from the adapter.
+        */
+       dprintk(("PREMAP\n"));
+
+       if((dev->regs.sa = (struct sa_registers *)ioremap((unsigned long)dev->scsi_host_ptr->base, 8192))==NULL)
+       {       
+               printk(KERN_WARNING "aacraid: unable to map ARM.\n" );
+               return -1;
+       }
+       /*
+        *      Check to see if the board failed any self tests.
+        */
+       if (sa_readl(dev, Mailbox7) & SELF_TEST_FAILED) {
+               printk(KERN_WARNING "%s%d: adapter self-test failed.\n", name, instance);
+               return -1;
+       }
+       /*
+        *      Check to see if the board panic'd while booting.
+        */
+       if (sa_readl(dev, Mailbox7) & KERNEL_PANIC) {
+               printk(KERN_WARNING "%s%d: adapter kernel panic'd.\n", name, instance);
+               return -1;
+       }
+       start = jiffies;
+       /*
+        *      Wait for the adapter to be up and running. Wait up to 3 minutes.
+        */
+       while (!(sa_readl(dev, Mailbox7) & KERNEL_UP_AND_RUNNING)) {
+               if (time_after(start+180*HZ, jiffies)) {
+                       status = sa_readl(dev, Mailbox7) >> 16;
+                       printk(KERN_WARNING "%s%d: adapter kernel failed to start, init status = %d.\n", name, instance, le32_to_cpu(status));
+                       return -1;
+               }
+#if 0
+               set_current_state(TASK_UNINTERRUPTIBLE);
+               schedule_timeout(1);
+#endif
+               mdelay(100); 
+       }
+
+       dprintk(("ATIRQ\n"));
+       if (request_irq(dev->scsi_host_ptr->irq, aac_sa_intr, SA_SHIRQ|SA_INTERRUPT, "aacraid", (void *)dev ) < 0) {
+               printk(KERN_WARNING "%s%d: Interrupt unavailable.\n", name, instance);
+               return -1;
+       }
+
+       /*
+        *      Fill in the function dispatch table.
+        */
+
+       dev->a_ops.adapter_interrupt = aac_sa_interrupt_adapter;
+       dev->a_ops.adapter_enable_int = aac_sa_enable_interrupt;
+       dev->a_ops.adapter_disable_int = aac_sa_disable_interrupt;
+       dev->a_ops.adapter_notify = aac_sa_notify_adapter;
+       dev->a_ops.adapter_sync_cmd = sa_sync_cmd;
+
+       dprintk(("FUNCDONE\n"));
+
+       if(aac_init_adapter(dev) == NULL)
+               return -1;
+
+       dprintk(("NEWADAPTDONE\n"));
+#if 0
+       /*
+        *      Start any kernel threads needed
+        */
+       dev->thread_pid = kernel_thread((int (*)(void *))aac_command_thread, dev, 0);
+#endif
+
+       /*
+        *      Tell the adapter that all is configure, and it can start 
+        *      accepting requests
+        */
+       dprintk(("STARTING\n"));
+       aac_sa_start_adapter(dev);
+       dprintk(("STARTED\n"));
+       return 0;
+}
+
diff --git a/xen-2.4.16/drivers/scsi/constants.c b/xen-2.4.16/drivers/scsi/constants.c
new file mode 100644 (file)
index 0000000..aea16f7
--- /dev/null
@@ -0,0 +1,1005 @@
+/* 
+ * ASCII values for a number of symbolic constants, printing functions,
+ * etc.
+ * Additions for SCSI 2 and Linux 2.2.x by D. Gilbert (990422)
+ *
+ */
+
+#define __NO_VERSION__
+#include <xeno/module.h>
+
+#include <xeno/config.h>
+#include <xeno/blk.h>
+/*#include <linux/kernel.h> */
+#include "scsi.h"
+#include "hosts.h"
+
+#define CONST_COMMAND   0x01
+#define CONST_STATUS    0x02
+#define CONST_SENSE     0x04
+#define CONST_XSENSE    0x08
+#define CONST_CMND      0x10
+#define CONST_MSG       0x20
+#define CONST_HOST     0x40
+#define CONST_DRIVER   0x80
+
+static const char unknown[] = "UNKNOWN";
+
+#ifdef CONFIG_SCSI_CONSTANTS
+#ifdef CONSTANTS
+#undef CONSTANTS
+#endif
+#define CONSTANTS (CONST_COMMAND | CONST_STATUS | CONST_SENSE | CONST_XSENSE \
+                  | CONST_CMND | CONST_MSG | CONST_HOST | CONST_DRIVER)
+#endif
+
+#if (CONSTANTS & CONST_COMMAND)
+static const char * group_0_commands[] = {
+/* 00-03 */ "Test Unit Ready", "Rezero Unit", unknown, "Request Sense",
+/* 04-07 */ "Format Unit", "Read Block Limits", unknown, "Reasssign Blocks",
+/* 08-0d */ "Read (6)", unknown, "Write (6)", "Seek (6)", unknown, unknown,
+/* 0e-12 */ unknown, "Read Reverse", "Write Filemarks", "Space", "Inquiry",  
+/* 13-16 */ "Verify", "Recover Buffered Data", "Mode Select", "Reserve",
+/* 17-1b */ "Release", "Copy", "Erase", "Mode Sense", "Start/Stop Unit",
+/* 1c-1d */ "Receive Diagnostic", "Send Diagnostic", 
+/* 1e-1f */ "Prevent/Allow Medium Removal", unknown,
+};
+
+
+static const char *group_1_commands[] = {
+/* 20-22 */  unknown, unknown, unknown,
+/* 23-28 */ unknown, "Define window parameters", "Read Capacity", 
+            unknown, unknown, "Read (10)", 
+/* 29-2d */ "Read Generation", "Write (10)", "Seek (10)", "Erase", 
+            "Read updated block", 
+/* 2e-31 */ "Write Verify","Verify", "Search High", "Search Equal", 
+/* 32-34 */ "Search Low", "Set Limits", "Prefetch or Read Position", 
+/* 35-37 */ "Synchronize Cache","Lock/Unlock Cache", "Read Defect Data", 
+/* 38-3c */ "Medium Scan", "Compare", "Copy Verify", "Write Buffer", 
+            "Read Buffer", 
+/* 3d-3f */ "Update Block", "Read Long",  "Write Long",
+};
+
+
+static const char *group_2_commands[] = {
+/* 40-41 */ "Change Definition", "Write Same", 
+/* 42-48 */ "Read sub-channel", "Read TOC", "Read header", 
+            "Play audio (10)", unknown, "Play audio msf",
+            "Play audio track/index", 
+/* 49-4f */ "Play track relative (10)", unknown, "Pause/resume", 
+            "Log Select", "Log Sense", unknown, unknown,
+/* 50-55 */ unknown, unknown, unknown, unknown, unknown, "Mode Select (10)",
+/* 56-5b */ unknown, unknown, unknown, unknown, "Mode Sense (10)", unknown,
+/* 5c-5f */ unknown, unknown, unknown,
+};
+
+
+/* The following are 16 byte commands in group 4 */
+static const char *group_4_commands[] = {
+/* 80-84 */ unknown, unknown, unknown, unknown, unknown,
+/* 85-89 */ "Memory Export In (16)", unknown, unknown, unknown,
+            "Memory Export Out (16)",
+/* 8a-8f */ unknown, unknown, unknown, unknown, unknown, unknown,
+/* 90-94 */ unknown, unknown, unknown, unknown, unknown,
+/* 95-99 */ unknown, unknown, unknown, unknown, unknown,
+/* 9a-9f */ unknown, unknown, unknown, unknown, unknown, unknown,
+};
+
+
+/* The following are 12 byte commands in group 5 */
+static const char *group_5_commands[] = {
+/* a0-a5 */ unknown, unknown, unknown, unknown, unknown,
+            "Move medium/play audio(12)",
+/* a6-a9 */ "Exchange medium", unknown, "Read(12)", "Play track relative(12)", 
+/* aa-ae */ "Write(12)", unknown, "Erase(12)", unknown, 
+            "Write and verify(12)", 
+/* af-b1 */ "Verify(12)", "Search data high(12)", "Search data equal(12)",
+/* b2-b4 */ "Search data low(12)", "Set limits(12)", unknown,
+/* b5-b6 */ "Request volume element address", "Send volume tag",
+/* b7-b9 */ "Read defect data(12)", "Read element status", unknown,
+/* ba-bf */ unknown, unknown, unknown, unknown, unknown, unknown,
+};
+
+
+
+#define group(opcode) (((opcode) >> 5) & 7)
+
+#define RESERVED_GROUP  0
+#define VENDOR_GROUP    1
+
+static const char **commands[] = {
+    group_0_commands, group_1_commands, group_2_commands, 
+    (const char **) RESERVED_GROUP, group_4_commands, 
+    group_5_commands, (const char **) VENDOR_GROUP, 
+    (const char **) VENDOR_GROUP
+};
+
+static const char reserved[] = "RESERVED";
+static const char vendor[] = "VENDOR SPECIFIC";
+
+static void print_opcode(int opcode) {
+    const char **table = commands[ group(opcode) ];
+    switch ((unsigned long) table) {
+    case RESERVED_GROUP:
+       printk("%s(0x%02x) ", reserved, opcode); 
+       break;
+    case VENDOR_GROUP:
+       printk("%s(0x%02x) ", vendor, opcode); 
+       break;
+    default:
+       if (table[opcode & 0x1f] != unknown)
+           printk("%s ",table[opcode & 0x1f]);
+       else
+           printk("%s(0x%02x) ", unknown, opcode);
+       break;
+    }
+}
+#else /* CONST & CONST_COMMAND */
+static void print_opcode(int opcode) {
+    printk("0x%02x ", opcode);
+}
+#endif  
+
+void print_command (unsigned char *command) {
+    int i,s;
+    print_opcode(command[0]);
+    for ( i = 1, s = COMMAND_SIZE(command[0]); i < s; ++i) 
+       printk("%02x ", command[i]);
+    printk("\n");
+}
+
+#if (CONSTANTS & CONST_STATUS)
+static const char * statuses[] = {
+/* 0-4 */ "Good", "Check Condition", "Condition Met", unknown, "Busy", 
+/* 5-9 */ unknown, unknown, unknown, "Intermediate", unknown, 
+/* a-c */ "Intermediate-Condition Met", unknown, "Reservation Conflict",
+/* d-10 */ unknown, unknown, unknown, unknown,
+/* 11-14 */ "Command Terminated", unknown, unknown, "Queue Full",
+/* 15-1a */ unknown, unknown, unknown, unknown, unknown, unknown,
+/* 1b-1f */ unknown, unknown, unknown, unknown, unknown,
+};
+#endif
+
+void print_status (int status) {
+    status = (status >> 1) & 0x1f;
+#if (CONSTANTS & CONST_STATUS)
+    printk("%s ",statuses[status]);
+#else
+    printk("0x%0x ", status); 
+#endif 
+}
+
+#if (CONSTANTS & CONST_XSENSE)
+#define D 0x0001  /* DIRECT ACCESS DEVICE (disk) */
+#define T 0x0002  /* SEQUENTIAL ACCESS DEVICE (tape) */
+#define L 0x0004  /* PRINTER DEVICE */
+#define P 0x0008  /* PROCESSOR DEVICE */
+#define W 0x0010  /* WRITE ONCE READ MULTIPLE DEVICE */
+#define R 0x0020  /* READ ONLY (CD-ROM) DEVICE */
+#define S 0x0040  /* SCANNER DEVICE */
+#define O 0x0080  /* OPTICAL MEMORY DEVICE */
+#define M 0x0100  /* MEDIA CHANGER DEVICE */
+#define C 0x0200  /* COMMUNICATION DEVICE */
+#define A 0x0400  /* ARRAY STORAGE */
+#define E 0x0800  /* ENCLOSURE SERVICES DEVICE */
+#define B 0x1000  /* SIMPLIFIED DIRECT ACCESS DEVICE */
+#define K 0x2000  /* OPTICAL CARD READER/WRITER DEVICE */
+
+struct error_info{
+    unsigned char code1, code2;
+    unsigned short int devices;
+    const char * text;
+};
+
+struct error_info2{
+    unsigned char code1, code2_min, code2_max;
+    unsigned short int devices;
+    const char * text;
+};
+
+static struct error_info2 additional2[] =
+{
+  {0x40,0x00,0x7f,D,"Ram failure (%x)"},
+  {0x40,0x80,0xff,D|T|L|P|W|R|S|O|M|C,"Diagnostic failure on component (%x)"},
+  {0x41,0x00,0xff,D,"Data path failure (%x)"},
+  {0x42,0x00,0xff,D,"Power-on or self-test failure (%x)"},
+  {0, 0, 0, 0, NULL}
+};
+
+static struct error_info additional[] =
+{
+  {0x00,0x00,D|T|L|P|W|R|S|O|M|C|A|E|B|K,"No additional sense information"},
+  {0x00,0x01,T,"Filemark detected"},
+  {0x00,0x02,T|S,"End-of-partition/medium detected"},
+  {0x00,0x03,T,"Setmark detected"},
+  {0x00,0x04,T|S,"Beginning-of-partition/medium detected"},
+  {0x00,0x05,T|L|S,"End-of-data detected"},
+  {0x00,0x06,D|T|L|P|W|R|S|O|M|C|A|E|B|K,"I/O process terminated"},
+  {0x00,0x11,R,"Audio play operation in progress"},
+  {0x00,0x12,R,"Audio play operation paused"},
+  {0x00,0x13,R,"Audio play operation successfully completed"},
+  {0x00,0x14,R,"Audio play operation stopped due to error"},
+  {0x00,0x15,R,"No current audio status to return"},
+  {0x00,0x16,D|T|L|P|W|R|S|O|M|C|A|E|B|K,"Operation in progress"},
+  {0x00,0x17,D|T|L|W|R|S|O|M|A|E|B|K,"Cleaning requested"},
+  {0x01,0x00,D|W|O|B|K,"No index/sector signal"},
+  {0x02,0x00,D|W|R|O|M|B|K,"No seek complete"},
+  {0x03,0x00,D|T|L|W|S|O|B|K,"Peripheral device write fault"},
+  {0x03,0x01,T,"No write current"},
+  {0x03,0x02,T,"Excessive write errors"},
+  {0x04,0x00,D|T|L|P|W|R|S|O|M|C|A|E|B|K,"Logical unit not ready,cause not reportable"},
+  {0x04,0x01,D|T|L|P|W|R|S|O|M|C|A|E|B|K,"Logical unit is in process of becoming ready"},
+  {0x04,0x02,D|T|L|P|W|R|S|O|M|C|A|E|B|K,"Logical unit not ready,initializing cmd. required"},
+  {0x04,0x03,D|T|L|P|W|R|S|O|M|C|A|E|B|K,"Logical unit not ready,manual intervention required"},
+  {0x04,0x04,D|T|L|R|O|B,"Logical unit not ready,format in progress"},
+  {0x04,0x05,D|T|W|O|M|C|A|B|K,"Logical unit not ready,rebuild in progress"},
+  {0x04,0x06,D|T|W|O|M|C|A|B|K,"Logical unit not ready,recalculation in progress"},
+  {0x04,0x07,D|T|L|P|W|R|S|O|M|C|A|E|B|K,"Logical unit not ready,operation in progress"},
+  {0x04,0x08,R,"Logical unit not ready,long write in progress"},
+  {0x04,0x09,D|T|L|P|W|R|S|O|M|C|A|E|B|K,"Logical unit not ready,self-test in progress"},
+  {0x05,0x00,D|T|L|W|R|S|O|M|C|A|E|B|K,"Logical unit does not respond to selection"},
+  {0x06,0x00,D|W|R|O|M|B|K,"No reference position found"},
+  {0x07,0x00,D|T|L|W|R|S|O|M|B|K,"Multiple peripheral devices selected"},
+  {0x08,0x00,D|T|L|W|R|S|O|M|C|A|E|B|K,"Logical unit communication failure"},
+  {0x08,0x01,D|T|L|W|R|S|O|M|C|A|E|B|K,"Logical unit communication time-out"},
+  {0x08,0x02,D|T|L|W|R|S|O|M|C|A|E|B|K,"Logical unit communication parity error"},
+  {0x08,0x03,D|T|R|O|M|B|K,"Logical unit communication CRC error (Ultra-DMA/32)"},
+  {0x08,0x04,D|T|L|P|W|R|S|O|C|K,"Unreachable copy target"},
+  {0x09,0x00,D|T|W|R|O|B,"Track following error"},
+  {0x09,0x01,W|R|O|K,"Tracking servo failure"},
+  {0x09,0x02,W|R|O|K,"Focus servo failure"},
+  {0x09,0x03,W|R|O,"Spindle servo failure"},
+  {0x09,0x04,D|T|W|R|O|B,"Head select fault"},
+  {0x0A,0x00,D|T|L|P|W|R|S|O|M|C|A|E|B|K,"Error log overflow"},
+  {0x0B,0x00,D|T|L|P|W|R|S|O|M|C|A|E|B|K,"Warning"},
+  {0x0B,0x01,D|T|L|P|W|R|S|O|M|C|A|E|B|K,"Warning - specified temperature exceeded"},
+  {0x0B,0x02,D|T|L|P|W|R|S|O|M|C|A|E|B|K,"Warning - enclosure degraded"},
+  {0x0C,0x00,T|R|S,"Write error"},
+  {0x0C,0x01,K,"Write error - recovered with auto reallocation"},
+  {0x0C,0x02,D|W|O|B|K,"Write error - auto reallocation failed"},
+  {0x0C,0x03,D|W|O|B|K,"Write error - recommend reassignment"},
+  {0x0C,0x04,D|T|W|O|B,"Compression check miscompare error"},
+  {0x0C,0x05,D|T|W|O|B,"Data expansion occurred during compression"},
+  {0x0C,0x06,D|T|W|O|B,"Block not compressible"},
+  {0x0C,0x07,R,"Write error - recovery needed"},
+  {0x0C,0x08,R,"Write error - recovery failed"},
+  {0x0C,0x09,R,"Write error - loss of streaming"},
+  {0x0C,0x0A,R,"Write error - padding blocks added"},
+  {0x10,0x00,D|W|O|B|K,"Id CRC or ECC error"},
+  {0x11,0x00,D|T|W|R|S|O|B|K,"Unrecovered read error"},
+  {0x11,0x01,D|T|W|R|S|O|B|K,"Read retries exhausted"},
+  {0x11,0x02,D|T|W|R|S|O|B|K,"Error too long to correct"},
+  {0x11,0x03,D|T|W|S|O|B|K,"Multiple read errors"},
+  {0x11,0x04,D|W|O|B|K,"Unrecovered read error - auto reallocate failed"},
+  {0x11,0x05,W|R|O|B,"L-EC uncorrectable error"},
+  {0x11,0x06,W|R|O|B,"CIRC unrecovered error"},
+  {0x11,0x07,W|O|B,"Data re-synchronization error"},
+  {0x11,0x08,T,"Incomplete block read"},
+  {0x11,0x09,T,"No gap found"},
+  {0x11,0x0A,D|T|O|B|K,"Miscorrected error"},
+  {0x11,0x0B,D|W|O|B|K,"Unrecovered read error - recommend reassignment"},
+  {0x11,0x0C,D|W|O|B|K,"Unrecovered read error - recommend rewrite the data"},
+  {0x11,0x0D,D|T|W|R|O|B,"De-compression CRC error"},
+  {0x11,0x0E,D|T|W|R|O|B,"Cannot decompress using declared algorithm"},
+  {0x11,0x0F,R,"Error reading UPC/EAN number"},
+  {0x11,0x10,R,"Error reading ISRC number"},
+  {0x11,0x11,R,"Read error - loss of streaming"},
+  {0x12,0x00,D|W|O|B|K,"Address mark not found for id field"},
+  {0x13,0x00,D|W|O|B|K,"Address mark not found for data field"},
+  {0x14,0x00,D|T|L|W|R|S|O|B|K,"Recorded entity not found"},
+  {0x14,0x01,D|T|W|R|O|B|K,"Record not found"},
+  {0x14,0x02,T,"Filemark or setmark not found"},
+  {0x14,0x03,T,"End-of-data not found"},
+  {0x14,0x04,T,"Block sequence error"},
+  {0x14,0x05,D|T|W|O|B|K,"Record not found - recommend reassignment"},
+  {0x14,0x06,D|T|W|O|B|K,"Record not found - data auto-reallocated"},
+  {0x15,0x00,D|T|L|W|R|S|O|M|B|K,"Random positioning error"},
+  {0x15,0x01,D|T|L|W|R|S|O|M|B|K,"Mechanical positioning error"},
+  {0x15,0x02,D|T|W|R|O|B|K,"Positioning error detected by read of medium"},
+  {0x16,0x00,D|W|O|B|K,"Data synchronization mark error"},
+  {0x16,0x01,D|W|O|B|K,"Data sync error - data rewritten"},
+  {0x16,0x02,D|W|O|B|K,"Data sync error - recommend rewrite"},
+  {0x16,0x03,D|W|O|B|K,"Data sync error - data auto-reallocated"},
+  {0x16,0x04,D|W|O|B|K,"Data sync error - recommend reassignment"},
+  {0x17,0x00,D|T|W|R|S|O|B|K,"Recovered data with no error correction applied"},
+  {0x17,0x01,D|T|W|R|S|O|B|K,"Recovered data with retries"},
+  {0x17,0x02,D|T|W|R|O|B|K,"Recovered data with positive head offset"},
+  {0x17,0x03,D|T|W|R|O|B|K,"Recovered data with negative head offset"},
+  {0x17,0x04,W|R|O|B,"Recovered data with retries and/or circ applied"},
+  {0x17,0x05,D|W|R|O|B|K,"Recovered data using previous sector id"},
+  {0x17,0x06,D|W|O|B|K,"Recovered data without ecc - data auto-reallocated"},
+  {0x17,0x07,D|W|R|O|B|K,"Recovered data without ecc - recommend reassignment"},
+  {0x17,0x08,D|W|R|O|B|K,"Recovered data without ecc - recommend rewrite"},
+  {0x17,0x09,D|W|R|O|B|K,"Recovered data without ecc - data rewritten"},
+  {0x18,0x00,D|T|W|R|O|B|K,"Recovered data with error correction applied"},
+  {0x18,0x01,D|W|R|O|B|K,"Recovered data with error corr. & retries applied"},
+  {0x18,0x02,D|W|R|O|B|K,"Recovered data - data auto-reallocated"},
+  {0x18,0x03,R,"Recovered data with CIRC"},
+  {0x18,0x04,R,"Recovered data with L-EC"},
+  {0x18,0x05,D|W|R|O|B|K,"Recovered data - recommend reassignment"},
+  {0x18,0x06,D|W|R|O|B|K,"Recovered data - recommend rewrite"},
+  {0x18,0x07,D|W|O|B|K,"Recovered data with ecc - data rewritten"},
+  {0x19,0x00,D|O|K,"Defect list error"},
+  {0x19,0x01,D|O|K,"Defect list not available"},
+  {0x19,0x02,D|O|K,"Defect list error in primary list"},
+  {0x19,0x03,D|O|K,"Defect list error in grown list"},
+  {0x1A,0x00,D|T|L|P|W|R|S|O|M|C|A|E|B|K,"Parameter list length error"},
+  {0x1B,0x00,D|T|L|P|W|R|S|O|M|C|A|E|B|K,"Synchronous data transfer error"},
+  {0x1C,0x00,D|O|B|K,"Defect list not found"},
+  {0x1C,0x01,D|O|B|K,"Primary defect list not found"},
+  {0x1C,0x02,D|O|B|K,"Grown defect list not found"},
+  {0x1D,0x00,D|T|W|R|O|B|K,"Miscompare during verify operation"},
+  {0x1E,0x00,D|W|O|B|K,"Recovered id with ecc correction"},
+  {0x1F,0x00,D|O|K,"Partial defect list transfer"},
+  {0x20,0x00,D|T|L|P|W|R|S|O|M|C|A|E|B|K,"Invalid command operation code"},
+  {0x21,0x00,D|T|W|R|O|M|B|K,"Logical block address out of range"},
+  {0x21,0x01,D|T|W|R|O|M|B|K,"Invalid element address"},
+  {0x22,0x00,D,"Illegal function (use 20 00,24 00,or 26 00)"},
+  {0x24,0x00,D|T|L|P|W|R|S|O|M|C|A|E|B|K,"Invalid field in cdb"},
+  {0x24,0x01,D|T|L|P|W|R|S|O|M|C|A|E|B|K,"CDB decryption error"},
+  {0x25,0x00,D|T|L|P|W|R|S|O|M|C|A|E|B|K,"Logical unit not supported"},
+  {0x26,0x00,D|T|L|P|W|R|S|O|M|C|A|E|B|K,"Invalid field in parameter list"},
+  {0x26,0x01,D|T|L|P|W|R|S|O|M|C|A|E|B|K,"Parameter not supported"},
+  {0x26,0x02,D|T|L|P|W|R|S|O|M|C|A|E|B|K,"Parameter value invalid"},
+  {0x26,0x03,D|T|L|P|W|R|S|O|M|C|A|E|K,"Threshold parameters not supported"},
+  {0x26,0x04,D|T|L|P|W|R|S|O|M|C|A|E|B|K,"Invalid release of persistent reservation"},
+  {0x26,0x05,D|T|L|P|W|R|S|O|M|C|A|B|K,"Data decryption error"},
+  {0x26,0x06,D|T|L|P|W|R|S|O|C|K,"Too many target descriptors"},
+  {0x26,0x07,D|T|L|P|W|R|S|O|C|K,"Unsupported target descriptor type code"},
+  {0x26,0x08,D|T|L|P|W|R|S|O|C|K,"Too many segment descriptors"},
+  {0x26,0x09,D|T|L|P|W|R|S|O|C|K,"Unsupported segment descriptor type code"},
+  {0x26,0x0A,D|T|L|P|W|R|S|O|C|K,"Unexpected inexact segment"},
+  {0x26,0x0B,D|T|L|P|W|R|S|O|C|K,"Inline data length exceeded"},
+  {0x26,0x0C,D|T|L|P|W|R|S|O|C|K,"Invalid operation for copy source or destination"},
+  {0x26,0x0D,D|T|L|P|W|R|S|O|C|K,"Copy segment granularity violation"},
+  {0x27,0x00,D|T|W|R|O|B|K,"Write protected"},
+  {0x27,0x01,D|T|W|R|O|B|K,"Hardware write protected"},
+  {0x27,0x02,D|T|W|R|O|B|K,"Logical unit software write protected"},
+  {0x27,0x03,T|R,"Associated write protect"},
+  {0x27,0x04,T|R,"Persistent write protect"},
+  {0x27,0x05,T|R,"Permanent write protect"},
+  {0x28,0x00,D|T|L|P|W|R|S|O|M|C|A|E|B|K,"Not ready to ready change,medium may have changed"},
+  {0x28,0x01,D|T|W|R|O|M|B,"Import or export element accessed"},
+  {0x29,0x00,D|T|L|P|W|R|S|O|M|C|A|E|B|K,"Power on,reset,or bus device reset occurred"},
+  {0x29,0x01,D|T|L|P|W|R|S|O|M|C|A|E|B|K,"Power on occurred"},
+  {0x29,0x02,D|T|L|P|W|R|S|O|M|C|A|E|B|K,"Scsi bus reset occurred"},
+  {0x29,0x03,D|T|L|P|W|R|S|O|M|C|A|E|B|K,"Bus device reset function occurred"},
+  {0x29,0x04,D|T|L|P|W|R|S|O|M|C|A|E|B|K,"Device internal reset"},
+  {0x29,0x05,D|T|L|P|W|R|S|O|M|C|A|E|B|K,"Transceiver mode changed to single-ended"},
+  {0x29,0x06,D|T|L|P|W|R|S|O|M|C|A|E|B|K,"Transceiver mode changed to lvd"},
+  {0x2A,0x00,D|T|L|W|R|S|O|M|C|A|E|B|K,"Parameters changed"},
+  {0x2A,0x01,D|T|L|W|R|S|O|M|C|A|E|B|K,"Mode parameters changed"},
+  {0x2A,0x02,D|T|L|W|R|S|O|M|C|A|E|K,"Log parameters changed"},
+  {0x2A,0x03,D|T|L|P|W|R|S|O|M|C|A|E|K,"Reservations preempted"},
+  {0x2A,0x04,D|T|L|P|W|R|S|O|M|C|A|E,"Reservations released"},
+  {0x2A,0x05,D|T|L|P|W|R|S|O|M|C|A|E,"Registrations preempted"},
+  {0x2B,0x00,D|T|L|P|W|R|S|O|C|K,"Copy cannot execute since host cannot disconnect"},
+  {0x2C,0x00,D|T|L|P|W|R|S|O|M|C|A|E|B|K,"Command sequence error"},
+  {0x2C,0x01,S,"Too many windows specified"},
+  {0x2C,0x02,S,"Invalid combination of windows specified"},
+  {0x2C,0x03,R,"Current program area is not empty"},
+  {0x2C,0x04,R,"Current program area is empty"},
+  {0x2C,0x05,B,"Illegal power condition request"},
+  {0x2D,0x00,T,"Overwrite error on update in place"},
+  {0x2F,0x00,D|T|L|P|W|R|S|O|M|C|A|E|B|K,"Commands cleared by another initiator"},
+  {0x30,0x00,D|T|W|R|O|M|B|K,"Incompatible medium installed"},
+  {0x30,0x01,D|T|W|R|O|B|K,"Cannot read medium - unknown format"},
+  {0x30,0x02,D|T|W|R|O|B|K,"Cannot read medium - incompatible format"},
+  {0x30,0x03,D|T|R|K,"Cleaning cartridge installed"},
+  {0x30,0x04,D|T|W|R|O|B|K,"Cannot write medium - unknown format"},
+  {0x30,0x05,D|T|W|R|O|B|K,"Cannot write medium - incompatible format"},
+  {0x30,0x06,D|T|W|R|O|B,"Cannot format medium - incompatible medium"},
+  {0x30,0x07,D|T|L|W|R|S|O|M|A|E|B|K,"Cleaning failure"},
+  {0x30,0x08,R,"Cannot write - application code mismatch"},
+  {0x30,0x09,R,"Current session not fixated for append"},
+  {0x31,0x00,D|T|W|R|O|B|K,"Medium format corrupted"},
+  {0x31,0x01,D|L|R|O|B,"Format command failed"},
+  {0x32,0x00,D|W|O|B|K,"No defect spare location available"},
+  {0x32,0x01,D|W|O|B|K,"Defect list update failure"},
+  {0x33,0x00,T,"Tape length error"},
+  {0x34,0x00,D|T|L|P|W|R|S|O|M|C|A|E|B|K,"Enclosure failure"},
+  {0x35,0x00,D|T|L|P|W|R|S|O|M|C|A|E|B|K,"Enclosure services failure"},
+  {0x35,0x01,D|T|L|P|W|R|S|O|M|C|A|E|B|K,"Unsupported enclosure function"},
+  {0x35,0x02,D|T|L|P|W|R|S|O|M|C|A|E|B|K,"Enclosure services unavailable"},
+  {0x35,0x03,D|T|L|P|W|R|S|O|M|C|A|E|B|K,"Enclosure services transfer failure"},
+  {0x35,0x04,D|T|L|P|W|R|S|O|M|C|A|E|B|K,"Enclosure services transfer refused"},
+  {0x36,0x00,L,"Ribbon,ink,or toner failure"},
+  {0x37,0x00,D|T|L|W|R|S|O|M|C|A|E|B|K,"Rounded parameter"},
+  {0x38,0x00,B,"Event status notification"},
+  {0x38,0x02,B,"Esn - power management class event"},
+  {0x38,0x04,B,"Esn - media class event"},
+  {0x38,0x06,B,"Esn - device busy class event"},
+  {0x39,0x00,D|T|L|W|R|S|O|M|C|A|E|K,"Saving parameters not supported"},
+  {0x3A,0x00,D|T|L|W|R|S|O|M|B|K,"Medium not present"},
+  {0x3A,0x01,D|T|W|R|O|M|B|K,"Medium not present - tray closed"},
+  {0x3A,0x02,D|T|W|R|O|M|B|K,"Medium not present - tray open"},
+  {0x3A,0x03,D|T|W|R|O|M|B,"Medium not present - loadable"},
+  {0x3A,0x04,D|T|W|R|O|M|B,"Medium not present - medium auxiliary memory accessible"},
+  {0x3B,0x00,T|L,"Sequential positioning error"},
+  {0x3B,0x01,T,"Tape position error at beginning-of-medium"},
+  {0x3B,0x02,T,"Tape position error at end-of-medium"},
+  {0x3B,0x03,L,"Tape or electronic vertical forms unit not ready"},
+  {0x3B,0x04,L,"Slew failure"},
+  {0x3B,0x05,L,"Paper jam"},
+  {0x3B,0x06,L,"Failed to sense top-of-form"},
+  {0x3B,0x07,L,"Failed to sense bottom-of-form"},
+  {0x3B,0x08,T,"Reposition error"},
+  {0x3B,0x09,S,"Read past end of medium"},
+  {0x3B,0x0A,S,"Read past beginning of medium"},
+  {0x3B,0x0B,S,"Position past end of medium"},
+  {0x3B,0x0C,T|S,"Position past beginning of medium"},
+  {0x3B,0x0D,D|T|W|R|O|M|B|K,"Medium destination element full"},
+  {0x3B,0x0E,D|T|W|R|O|M|B|K,"Medium source element empty"},
+  {0x3B,0x0F,R,"End of medium reached"},
+  {0x3B,0x11,D|T|W|R|O|M|B|K,"Medium magazine not accessible"},
+  {0x3B,0x12,D|T|W|R|O|M|B|K,"Medium magazine removed"},
+  {0x3B,0x13,D|T|W|R|O|M|B|K,"Medium magazine inserted"},
+  {0x3B,0x14,D|T|W|R|O|M|B|K,"Medium magazine locked"},
+  {0x3B,0x15,D|T|W|R|O|M|B|K,"Medium magazine unlocked"},
+  {0x3B,0x16,R,"Mechanical positioning or changer error"},
+  {0x3D,0x00,D|T|L|P|W|R|S|O|M|C|A|E|K,"Invalid bits in identify message"},
+  {0x3E,0x00,D|T|L|P|W|R|S|O|M|C|A|E|B|K,"Logical unit has not self-configured yet"},
+  {0x3E,0x01,D|T|L|P|W|R|S|O|M|C|A|E|B|K,"Logical unit failure"},
+  {0x3E,0x02,D|T|L|P|W|R|S|O|M|C|A|E|B|K,"Timeout on logical unit"},
+  {0x3E,0x03,D|T|L|P|W|R|S|O|M|C|A|E|B|K,"Logical unit failed self-test"},
+  {0x3E,0x04,D|T|L|P|W|R|S|O|M|C|A|E|B|K,"Logical unit unable to update self-test log"},
+  {0x3F,0x00,D|T|L|P|W|R|S|O|M|C|A|E|B|K,"Target operating conditions have changed"},
+  {0x3F,0x01,D|T|L|P|W|R|S|O|M|C|A|E|B|K,"Microcode has been changed"},
+  {0x3F,0x02,D|T|L|P|W|R|S|O|M|C|B|K,"Changed operating definition"},
+  {0x3F,0x03,D|T|L|P|W|R|S|O|M|C|A|E|B|K,"Inquiry data has changed"},
+  {0x3F,0x04,D|T|W|R|O|M|C|A|E|B|K,"Component device attached"},
+  {0x3F,0x05,D|T|W|R|O|M|C|A|E|B|K,"Device identifier changed"},
+  {0x3F,0x06,D|T|W|R|O|M|C|A|E|B,"Redundancy group created or modified"},
+  {0x3F,0x07,D|T|W|R|O|M|C|A|E|B,"Redundancy group deleted"},
+  {0x3F,0x08,D|T|W|R|O|M|C|A|E|B,"Spare created or modified"},
+  {0x3F,0x09,D|T|W|R|O|M|C|A|E|B,"Spare deleted"},
+  {0x3F,0x0A,D|T|W|R|O|M|C|A|E|B|K,"Volume set created or modified"},
+  {0x3F,0x0B,D|T|W|R|O|M|C|A|E|B|K,"Volume set deleted"},
+  {0x3F,0x0C,D|T|W|R|O|M|C|A|E|B|K,"Volume set deassigned"},
+  {0x3F,0x0D,D|T|W|R|O|M|C|A|E|B|K,"Volume set reassigned"},
+  {0x3F,0x0E,D|T|L|P|W|R|S|O|M|C|A|E,"Reported luns data has changed"},
+  {0x3F,0x10,D|T|W|R|O|M|B,"Medium loadable"},
+  {0x3F,0x11,D|T|W|R|O|M|B,"Medium auxiliary memory accessible"},
+  {0x40,0x00,D,"Ram failure (should use 40 nn)"},
+  /*
+   * FIXME(eric) - need a way to represent wildcards here.
+   */
+  {0x40,0x00,D|T|L|P|W|R|S|O|M|C|A|E|B|K,"Diagnostic failure on component nn (80h-ffh)"},
+  {0x41,0x00,D,"Data path failure (should use 40 nn)"},
+  {0x42,0x00,D,"Power-on or self-test failure (should use 40 nn)"},
+  {0x43,0x00,D|T|L|P|W|R|S|O|M|C|A|E|B|K,"Message error"},
+  {0x44,0x00,D|T|L|P|W|R|S|O|M|C|A|E|B|K,"Internal target failure"},
+  {0x45,0x00,D|T|L|P|W|R|S|O|M|C|A|E|B|K,"Select or reselect failure"},
+  {0x46,0x00,D|T|L|P|W|R|S|O|M|C|B|K,"Unsuccessful soft reset"},
+  {0x47,0x00,D|T|L|P|W|R|S|O|M|C|A|E|B|K,"Scsi parity error"},
+  {0x47,0x01,D|T|L|P|W|R|S|O|M|C|A|E|B|K,"Data phase CRC error detected"},
+  {0x47,0x02,D|T|L|P|W|R|S|O|M|C|A|E|B|K,"Scsi parity error detected during st data phase"},
+  {0x47,0x03,D|T|L|P|W|R|S|O|M|C|A|E|B|K,"Information unit CRC error detected"},
+  {0x47,0x04,D|T|L|P|W|R|S|O|M|C|A|E|B|K,"Asynchronous information protection error detected"},
+  {0x48,0x00,D|T|L|P|W|R|S|O|M|C|A|E|B|K,"Initiator detected error message received"},
+  {0x49,0x00,D|T|L|P|W|R|S|O|M|C|A|E|B|K,"Invalid message error"},
+  {0x4A,0x00,D|T|L|P|W|R|S|O|M|C|A|E|B|K,"Command phase error"},
+  {0x4B,0x00,D|T|L|P|W|R|S|O|M|C|A|E|B|K,"Data phase error"},
+  {0x4C,0x00,D|T|L|P|W|R|S|O|M|C|A|E|B|K,"Logical unit failed self-configuration"},
+  /*
+   * FIXME(eric) - need a way to represent wildcards here.
+   */
+  {0x4D,0x00,D|T|L|P|W|R|S|O|M|C|A|E|B|K,"Tagged overlapped commands (nn = queue tag)"},
+  {0x4E,0x00,D|T|L|P|W|R|S|O|M|C|A|E|B|K,"Overlapped commands attempted"},
+  {0x50,0x00,T,"Write append error"},
+  {0x50,0x01,T,"Write append position error"},
+  {0x50,0x02,T,"Position error related to timing"},
+  {0x51,0x00,T|R|O,"Erase failure"},
+  {0x52,0x00,T,"Cartridge fault"},
+  {0x53,0x00,D|T|L|W|R|S|O|M|B|K,"Media load or eject failed"},
+  {0x53,0x01,T,"Unload tape failure"},
+  {0x53,0x02,D|T|W|R|O|M|B|K,"Medium removal prevented"},
+  {0x54,0x00,P,"Scsi to host system interface failure"},
+  {0x55,0x00,P,"System resource failure"},
+  {0x55,0x01,D|O|B|K,"System buffer full"},
+  {0x55,0x02,D|T|L|P|W|R|S|O|M|A|E|K,"Insufficient reservation resources"},
+  {0x55,0x03,D|T|L|P|W|R|S|O|M|C|A|E,"Insufficient resources"},
+  {0x55,0x04,D|T|L|P|W|R|S|O|M|A|E,"Insufficient registration resources"},
+  {0x57,0x00,R,"Unable to recover table-of-contents"},
+  {0x58,0x00,O,"Generation does not exist"},
+  {0x59,0x00,O,"Updated block read"},
+  {0x5A,0x00,D|T|L|P|W|R|S|O|M|B|K,"Operator request or state change input"},
+  {0x5A,0x01,D|T|W|R|O|M|B|K,"Operator medium removal request"},
+  {0x5A,0x02,D|T|W|R|O|A|B|K,"Operator selected write protect"},
+  {0x5A,0x03,D|T|W|R|O|A|B|K,"Operator selected write permit"},
+  {0x5B,0x00,D|T|L|P|W|R|S|O|M|K,"Log exception"},
+  {0x5B,0x01,D|T|L|P|W|R|S|O|M|K,"Threshold condition met"},
+  {0x5B,0x02,D|T|L|P|W|R|S|O|M|K,"Log counter at maximum"},
+  {0x5B,0x03,D|T|L|P|W|R|S|O|M|K,"Log list codes exhausted"},
+  {0x5C,0x00,D|O,"Rpl status change"},
+  {0x5C,0x01,D|O,"Spindles synchronized"},
+  {0x5C,0x02,D|O,"Spindles not synchronized"},
+  {0x5D,0x00,D|T|L|P|W|R|S|O|M|C|A|E|B|K,"Failure prediction threshold exceeded"},
+  {0x5D,0x01,R|B,"Media failure prediction threshold exceeded"},
+  {0x5D,0x02,R,"Logical unit failure prediction threshold exceeded"},
+  {0x5D,0x10,D|B,"Hardware impending failure general hard drive failure"},
+  {0x5D,0x11,D|B,"Hardware impending failure drive error rate too high"},
+  {0x5D,0x12,D|B,"Hardware impending failure data error rate too high"},
+  {0x5D,0x13,D|B,"Hardware impending failure seek error rate too high"},
+  {0x5D,0x14,D|B,"Hardware impending failure too many block reassigns"},
+  {0x5D,0x15,D|B,"Hardware impending failure access times too high"},
+  {0x5D,0x16,D|B,"Hardware impending failure start unit times too high"},
+  {0x5D,0x17,D|B,"Hardware impending failure channel parametrics"},
+  {0x5D,0x18,D|B,"Hardware impending failure controller detected"},
+  {0x5D,0x19,D|B,"Hardware impending failure throughput performance"},
+  {0x5D,0x1A,D|B,"Hardware impending failure seek time performance"},
+  {0x5D,0x1B,D|B,"Hardware impending failure spin-up retry count"},
+  {0x5D,0x1C,D|B,"Hardware impending failure drive calibration retry count"},
+  {0x5D,0x20,D|B,"Controller impending failure general hard drive failure"},
+  {0x5D,0x21,D|B,"Controller impending failure drive error rate too high"},
+  {0x5D,0x22,D|B,"Controller impending failure data error rate too high"},
+  {0x5D,0x23,D|B,"Controller impending failure seek error rate too high"},
+  {0x5D,0x24,D|B,"Controller impending failure too many block reassigns"},
+  {0x5D,0x25,D|B,"Controller impending failure access times too high"},
+  {0x5D,0x26,D|B,"Controller impending failure start unit times too high"},
+  {0x5D,0x27,D|B,"Controller impending failure channel parametrics"},
+  {0x5D,0x28,D|B,"Controller impending failure controller detected"},
+  {0x5D,0x29,D|B,"Controller impending failure throughput performance"},
+  {0x5D,0x2A,D|B,"Controller impending failure seek time performance"},
+  {0x5D,0x2B,D|B,"Controller impending failure spin-up retry count"},
+  {0x5D,0x2C,D|B,"Controller impending failure drive calibration retry count"},
+  {0x5D,0x30,D|B,"Data channel impending failure general hard drive failure"},
+  {0x5D,0x31,D|B,"Data channel impending failure drive error rate too high"},
+  {0x5D,0x32,D|B,"Data channel impending failure data error rate too high"},
+  {0x5D,0x33,D|B,"Data channel impending failure seek error rate too high"},
+  {0x5D,0x34,D|B,"Data channel impending failure too many block reassigns"},
+  {0x5D,0x35,D|B,"Data channel impending failure access times too high"},
+  {0x5D,0x36,D|B,"Data channel impending failure start unit times too high"},
+  {0x5D,0x37,D|B,"Data channel impending failure channel parametrics"},
+  {0x5D,0x38,D|B,"Data channel impending failure controller detected"},
+  {0x5D,0x39,D|B,"Data channel impending failure throughput performance"},
+  {0x5D,0x3A,D|B,"Data channel impending failure seek time performance"},
+  {0x5D,0x3B,D|B,"Data channel impending failure spin-up retry count"},
+  {0x5D,0x3C,D|B,"Data channel impending failure drive calibration retry count"},
+  {0x5D,0x40,D|B,"Servo impending failure general hard drive failure"},
+  {0x5D,0x41,D|B,"Servo impending failure drive error rate too high"},
+  {0x5D,0x42,D|B,"Servo impending failure data error rate too high"},
+  {0x5D,0x43,D|B,"Servo impending failure seek error rate too high"},
+  {0x5D,0x44,D|B,"Servo impending failure too many block reassigns"},
+  {0x5D,0x45,D|B,"Servo impending failure access times too high"},
+  {0x5D,0x46,D|B,"Servo impending failure start unit times too high"},
+  {0x5D,0x47,D|B,"Servo impending failure channel parametrics"},
+  {0x5D,0x48,D|B,"Servo impending failure controller detected"},
+  {0x5D,0x49,D|B,"Servo impending failure throughput performance"},
+  {0x5D,0x4A,D|B,"Servo impending failure seek time performance"},
+  {0x5D,0x4B,D|B,"Servo impending failure spin-up retry count"},
+  {0x5D,0x4C,D|B,"Servo impending failure drive calibration retry count"},
+  {0x5D,0x50,D|B,"Spindle impending failure general hard drive failure"},
+  {0x5D,0x51,D|B,"Spindle impending failure drive error rate too high"},
+  {0x5D,0x52,D|B,"Spindle impending failure data error rate too high"},
+  {0x5D,0x53,D|B,"Spindle impending failure seek error rate too high"},
+  {0x5D,0x54,D|B,"Spindle impending failure too many block reassigns"},
+  {0x5D,0x55,D|B,"Spindle impending failure access times too high"},
+  {0x5D,0x56,D|B,"Spindle impending failure start unit times too high"},
+  {0x5D,0x57,D|B,"Spindle impending failure channel parametrics"},
+  {0x5D,0x58,D|B,"Spindle impending failure controller detected"},
+  {0x5D,0x59,D|B,"Spindle impending failure throughput performance"},
+  {0x5D,0x5A,D|B,"Spindle impending failure seek time performance"},
+  {0x5D,0x5B,D|B,"Spindle impending failure spin-up retry count"},
+  {0x5D,0x5C,D|B,"Spindle impending failure drive calibration retry count"},
+  {0x5D,0x60,D|B,"Firmware impending failure general hard drive failure"},
+  {0x5D,0x61,D|B,"Firmware impending failure drive error rate too high"},
+  {0x5D,0x62,D|B,"Firmware impending failure data error rate too high"},
+  {0x5D,0x63,D|B,"Firmware impending failure seek error rate too high"},
+  {0x5D,0x64,D|B,"Firmware impending failure too many block reassigns"},
+  {0x5D,0x65,D|B,"Firmware impending failure access times too high"},
+  {0x5D,0x66,D|B,"Firmware impending failure start unit times too high"},
+  {0x5D,0x67,D|B,"Firmware impending failure channel parametrics"},
+  {0x5D,0x68,D|B,"Firmware impending failure controller detected"},
+  {0x5D,0x69,D|B,"Firmware impending failure throughput performance"},
+  {0x5D,0x6A,D|B,"Firmware impending failure seek time performance"},
+  {0x5D,0x6B,D|B,"Firmware impending failure spin-up retry count"},
+  {0x5D,0x6C,D|B,"Firmware impending failure drive calibration retry count"},
+  {0x5D,0xFF,D|T|L|P|W|R|S|O|M|C|A|E|B|K,"Failure prediction threshold exceeded (false)"},
+  {0x5E,0x00,D|T|L|P|W|R|S|O|C|A|K,"Low power condition on"},
+  {0x5E,0x01,D|T|L|P|W|R|S|O|C|A|K,"Idle condition activated by timer"},
+  {0x5E,0x02,D|T|L|P|W|R|S|O|C|A|K,"Standby condition activated by timer"},
+  {0x5E,0x03,D|T|L|P|W|R|S|O|C|A|K,"Idle condition activated by command"},
+  {0x5E,0x04,D|T|L|P|W|R|S|O|C|A|K,"Standby condition activated by command"},
+  {0x5E,0x41,B,"Power state change to active"},
+  {0x5E,0x42,B,"Power state change to idle"},
+  {0x5E,0x43,B,"Power state change to standby"},
+  {0x5E,0x45,B,"Power state change to sleep"},
+  {0x5E,0x47,B|K,"Power state change to device control"},
+  {0x60,0x00,S,"Lamp failure"},
+  {0x61,0x00,S,"Video acquisition error"},
+  {0x61,0x01,S,"Unable to acquire video"},
+  {0x61,0x02,S,"Out of focus"},
+  {0x62,0x00,S,"Scan head positioning error"},
+  {0x63,0x00,R,"End of user area encountered on this track"},
+  {0x63,0x01,R,"Packet does not fit in available space"},
+  {0x64,0x00,R,"Illegal mode for this track"},
+  {0x64,0x01,R,"Invalid packet size"},
+  {0x65,0x00,D|T|L|P|W|R|S|O|M|C|A|E|B|K,"Voltage fault"},
+  {0x66,0x00,S,"Automatic document feeder cover up"},
+  {0x66,0x01,S,"Automatic document feeder lift up"},
+  {0x66,0x02,S,"Document jam in automatic document feeder"},
+  {0x66,0x03,S,"Document miss feed automatic in document feeder"},
+  {0x67,0x00,A,"Configuration failure"},
+  {0x67,0x01,A,"Configuration of incapable logical units failed"},
+  {0x67,0x02,A,"Add logical unit failed"},
+  {0x67,0x03,A,"Modification of logical unit failed"},
+  {0x67,0x04,A,"Exchange of logical unit failed"},
+  {0x67,0x05,A,"Remove of logical unit failed"},
+  {0x67,0x06,A,"Attachment of logical unit failed"},
+  {0x67,0x07,A,"Creation of logical unit failed"},
+  {0x67,0x08,A,"Assign failure occurred"},
+  {0x67,0x09,A,"Multiply assigned logical unit"},
+  {0x68,0x00,A,"Logical unit not configured"},
+  {0x69,0x00,A,"Data loss on logical unit"},
+  {0x69,0x01,A,"Multiple logical unit failures"},
+  {0x69,0x02,A,"Parity/data mismatch"},
+  {0x6A,0x00,A,"Informational,refer to log"},
+  {0x6B,0x00,A,"State change has occurred"},
+  {0x6B,0x01,A,"Redundancy level got better"},
+  {0x6B,0x02,A,"Redundancy level got worse"},
+  {0x6C,0x00,A,"Rebuild failure occurred"},
+  {0x6D,0x00,A,"Recalculate failure occurred"},
+  {0x6E,0x00,A,"Command to logical unit failed"},
+  {0x6F,0x00,R,"Copy protection key exchange failure - authentication failure"},
+  {0x6F,0x01,R,"Copy protection key exchange failure - key not present"},
+  {0x6F,0x02,R,"Copy protection key exchange failure - key not established"},
+  {0x6F,0x03,R,"Read of scrambled sector without authentication"},
+  {0x6F,0x04,R,"Media region code is mismatched to logical unit region"},
+  {0x6F,0x05,R,"Drive region must be permanent/region reset count error"},
+  /*
+   * FIXME(eric) - need a way to represent wildcards here.
+   */
+  {0x70,0x00,T,"Decompression exception short algorithm id of nn"},
+  {0x71,0x00,T,"Decompression exception long algorithm id"},
+  {0x72,0x00,R,"Session fixation error"},
+  {0x72,0x01,R,"Session fixation error writing lead-in"},
+  {0x72,0x02,R,"Session fixation error writing lead-out"},
+  {0x72,0x03,R,"Session fixation error - incomplete track in session"},
+  {0x72,0x04,R,"Empty or partially written reserved track"},
+  {0x72,0x05,R,"No more track reservations allowed"},
+  {0x73,0x00,R,"Cd control error"},
+  {0x73,0x01,R,"Power calibration area almost full"},
+  {0x73,0x02,R,"Power calibration area is full"},
+  {0x73,0x03,R,"Power calibration area error"},
+  {0x73,0x04,R,"Program memory area update failure"},
+  {0x73,0x05,R,"Program memory area is full"},
+  {0x73,0x06,R,"RMA/PMA is full"},
+  {0, 0, 0, NULL}
+};
+#endif
+
+#if (CONSTANTS & CONST_SENSE)
+static const char *snstext[] = {
+    "None",                     /* There is no sense information */
+    "Recovered Error",          /* The last command completed successfully
+                                   but used error correction */
+    "Not Ready",                /* The addressed target is not ready */
+    "Medium Error",             /* Data error detected on the medium */
+    "Hardware Error",           /* Controller or device failure */
+    "Illegal Request",
+    "Unit Attention",           /* Removable medium was changed, or
+                                   the target has been reset */
+    "Data Protect",             /* Access to the data is blocked */
+    "Blank Check",              /* Reached unexpected written or unwritten
+                                   region of the medium */
+    "Key=9",                    /* Vendor specific */
+    "Copy Aborted",             /* COPY or COMPARE was aborted */
+    "Aborted Command",          /* The target aborted the command */
+    "Equal",                    /* A SEARCH DATA command found data equal */
+    "Volume Overflow",          /* Medium full with still data to be written */
+    "Miscompare",               /* Source data and data on the medium
+                                   do not agree */
+    "Key=15"                    /* Reserved */
+};
+#endif
+
+/* Print sense information */
+static 
+void print_sense_internal(const char * devclass, 
+                         const unsigned char * sense_buffer,
+                         kdev_t dev)
+{
+    int i, s;
+    int sense_class, valid, code, info;
+    const char * error = NULL;
+    
+    sense_class = (sense_buffer[0] >> 4) & 0x07;
+    code = sense_buffer[0] & 0xf;
+    valid = sense_buffer[0] & 0x80;
+    
+    if (sense_class == 7) {    /* extended sense data */
+       s = sense_buffer[7] + 8;
+       if(s > SCSI_SENSE_BUFFERSIZE)
+          s = SCSI_SENSE_BUFFERSIZE;
+       
+       info = ((sense_buffer[3] << 24) | (sense_buffer[4] << 16) |
+               (sense_buffer[5] << 8) | sense_buffer[6]);
+       if (info || valid) {
+               printk("Info fld=0x%x", info);
+               if (!valid)     /* info data not according to standard */
+                       printk(" (nonstd)");
+               printk(", ");
+       }
+       if (sense_buffer[2] & 0x80)
+           printk( "FMK ");    /* current command has read a filemark */
+       if (sense_buffer[2] & 0x40)
+           printk( "EOM ");    /* end-of-medium condition exists */
+       if (sense_buffer[2] & 0x20)
+           printk( "ILI ");    /* incorrect block length requested */
+       
+       switch (code) {
+       case 0x0:
+           error = "Current";  /* error concerns current command */
+           break;
+       case 0x1:
+           error = "Deferred"; /* error concerns some earlier command */
+               /* e.g., an earlier write to disk cache succeeded, but
+                   now the disk discovers that it cannot write the data */
+           break;
+       default:
+           error = "Invalid";
+       }
+       
+       printk("%s ", error);
+       
+#if (CONSTANTS & CONST_SENSE)
+       printk( "%s%s: sense key %s\n", devclass,
+              kdevname(dev), snstext[sense_buffer[2] & 0x0f]);
+#else
+       printk("%s%s: sns = %2x %2x\n", devclass,
+              kdevname(dev), sense_buffer[0], sense_buffer[2]);
+#endif
+       
+       /* Check to see if additional sense information is available */
+       if(sense_buffer[7] + 7 < 13 ||
+          (sense_buffer[12] == 0  && sense_buffer[13] ==  0)) goto done;
+       
+#if (CONSTANTS & CONST_XSENSE)
+       for(i=0; additional[i].text; i++)
+           if(additional[i].code1 == sense_buffer[12] &&
+              additional[i].code2 == sense_buffer[13])
+               printk("Additional sense indicates %s\n", additional[i].text);
+       
+       for(i=0; additional2[i].text; i++)
+           if(additional2[i].code1 == sense_buffer[12] &&
+              additional2[i].code2_min >= sense_buffer[13]  &&
+              additional2[i].code2_max <= sense_buffer[13]) {
+               printk("Additional sense indicates ");
+               printk(additional2[i].text, sense_buffer[13]);
+               printk("\n");
+           };
+#else
+       printk("ASC=%2x ASCQ=%2x\n", sense_buffer[12], sense_buffer[13]);
+#endif
+    } else {   /* non-extended sense data */
+
+         /*
+          * Standard says:
+          *    sense_buffer[0] & 0200 : address valid
+          *    sense_buffer[0] & 0177 : vendor-specific error code
+          *    sense_buffer[1] & 0340 : vendor-specific
+          *    sense_buffer[1..3] : 21-bit logical block address
+          */
+       
+#if (CONSTANTS & CONST_SENSE)
+       if (sense_buffer[0] < 15)
+           printk("%s%s: old sense key %s\n", devclass,
+             kdevname(dev), snstext[sense_buffer[0] & 0x0f]);
+       else
+#endif
+           printk("%s%s: sns = %2x %2x\n", devclass,
+             kdevname(dev), sense_buffer[0], sense_buffer[2]);
+       
+       printk("Non-extended sense class %d code 0x%0x\n", sense_class, code);
+       s = 4;
+    }
+    
+ done:
+#if !(CONSTANTS & CONST_SENSE)
+    printk("Raw sense data:");
+    for (i = 0; i < s; ++i) 
+       printk("0x%02x ", sense_buffer[i]);
+    printk("\n");
+#endif
+    return;
+}
+
+void print_sense(const char * devclass, Scsi_Cmnd * SCpnt)
+{
+       print_sense_internal(devclass, SCpnt->sense_buffer,
+                            SCpnt->request.rq_dev);
+}
+
+void print_req_sense(const char * devclass, Scsi_Request * SRpnt)
+{
+       print_sense_internal(devclass, SRpnt->sr_sense_buffer,
+                            SRpnt->sr_request.rq_dev);
+}
+
+#if (CONSTANTS & CONST_MSG) 
+static const char *one_byte_msgs[] = {
+/* 0x00 */ "Command Complete", NULL, "Save Pointers",
+/* 0x03 */ "Restore Pointers", "Disconnect", "Initiator Error", 
+/* 0x06 */ "Abort", "Message Reject", "Nop", "Message Parity Error",
+/* 0x0a */ "Linked Command Complete", "Linked Command Complete w/flag",
+/* 0x0c */ "Bus device reset", "Abort Tag", "Clear Queue", 
+/* 0x0f */ "Initiate Recovery", "Release Recovery"
+};
+
+#define NO_ONE_BYTE_MSGS (sizeof(one_byte_msgs)  / sizeof (const char *))
+
+static const char *two_byte_msgs[] = {
+/* 0x20 */ "Simple Queue Tag", "Head of Queue Tag", "Ordered Queue Tag"
+/* 0x23 */ "Ignore Wide Residue"
+};
+
+#define NO_TWO_BYTE_MSGS (sizeof(two_byte_msgs)  / sizeof (const char *))
+
+static const char *extended_msgs[] = {
+/* 0x00 */ "Modify Data Pointer", "Synchronous Data Transfer Request",
+/* 0x02 */ "SCSI-I Extended Identify", "Wide Data Transfer Request"
+};
+
+#define NO_EXTENDED_MSGS (sizeof(two_byte_msgs)  / sizeof (const char *))
+#endif /* (CONSTANTS & CONST_MSG) */
+
+int print_msg (const unsigned char *msg) {
+    int len = 0, i;
+    if (msg[0] == EXTENDED_MESSAGE) {
+       len = 3 + msg[1];
+#if (CONSTANTS & CONST_MSG)
+       if (msg[2] < NO_EXTENDED_MSGS)
+           printk ("%s ", extended_msgs[msg[2]]); 
+       else 
+           printk ("Extended Message, reserved code (0x%02x) ", (int) msg[2]);
+       switch (msg[2]) {
+       case EXTENDED_MODIFY_DATA_POINTER:
+           printk("pointer = %d", (int) (msg[3] << 24) | (msg[4] << 16) | 
+                  (msg[5] << 8) | msg[6]);
+           break;
+       case EXTENDED_SDTR:
+           printk("period = %d ns, offset = %d", (int) msg[3] * 4, (int) 
+                  msg[4]);
+           break;
+       case EXTENDED_WDTR:
+           printk("width = 2^%d bytes", msg[3]);
+           break;
+       default:
+           for (i = 2; i < len; ++i) 
+               printk("%02x ", msg[i]);
+       }
+#else
+       for (i = 0; i < len; ++i)
+           printk("%02x ", msg[i]);
+#endif
+       /* Identify */
+    } else if (msg[0] & 0x80) {
+#if (CONSTANTS & CONST_MSG)
+       printk("Identify disconnect %sallowed %s %d ",
+              (msg[0] & 0x40) ? "" : "not ",
+              (msg[0] & 0x20) ? "target routine" : "lun",
+              msg[0] & 0x7);
+#else
+       printk("%02x ", msg[0]);
+#endif
+       len = 1;
+       /* Normal One byte */
+    } else if (msg[0] < 0x1f) {
+#if (CONSTANTS & CONST_MSG)
+       if (msg[0] < NO_ONE_BYTE_MSGS)
+           printk(one_byte_msgs[msg[0]]);
+       else
+           printk("reserved (%02x) ", msg[0]);
+#else
+       printk("%02x ", msg[0]);
+#endif
+       len = 1;
+       /* Two byte */
+    } else if (msg[0] <= 0x2f) {
+#if (CONSTANTS & CONST_MSG)
+       if ((msg[0] - 0x20) < NO_TWO_BYTE_MSGS)
+           printk("%s %02x ", two_byte_msgs[msg[0] - 0x20], 
+                  msg[1]);
+       else 
+           printk("reserved two byte (%02x %02x) ", 
+                  msg[0], msg[1]);
+#else
+       printk("%02x %02x", msg[0], msg[1]);
+#endif
+       len = 2;
+    } else 
+#if (CONSTANTS & CONST_MSG)
+       printk(reserved);
+#else
+    printk("%02x ", msg[0]);
+#endif
+    return len;
+}
+
+void print_Scsi_Cmnd (Scsi_Cmnd *cmd) {
+    printk("scsi%d : destination target %d, lun %d\n", 
+          cmd->host->host_no, 
+          cmd->target, 
+          cmd->lun);
+    printk("        command = ");
+    print_command (cmd->cmnd);
+}
+
+#if (CONSTANTS & CONST_HOST)
+static const char * hostbyte_table[]={
+"DID_OK", "DID_NO_CONNECT", "DID_BUS_BUSY", "DID_TIME_OUT", "DID_BAD_TARGET", 
+"DID_ABORT", "DID_PARITY", "DID_ERROR", "DID_RESET", "DID_BAD_INTR",
+"DID_PASSTHROUGH", "DID_SOFT_ERROR", NULL};
+
+void print_hostbyte(int scsiresult)
+{   static int maxcode=0;
+    int i;
+   
+    if(!maxcode) {
+       for(i=0;hostbyte_table[i];i++) ;
+       maxcode=i-1;
+    }
+    printk("Hostbyte=0x%02x",host_byte(scsiresult));
+    if(host_byte(scsiresult)>maxcode) {
+       printk("is invalid "); 
+       return;
+    }
+    printk("(%s) ",hostbyte_table[host_byte(scsiresult)]);
+}
+#else
+void print_hostbyte(int scsiresult)
+{   printk("Hostbyte=0x%02x ",host_byte(scsiresult));
+}
+#endif
+
+#if (CONSTANTS & CONST_DRIVER)
+static const char * driverbyte_table[]={
+"DRIVER_OK", "DRIVER_BUSY", "DRIVER_SOFT",  "DRIVER_MEDIA", "DRIVER_ERROR", 
+"DRIVER_INVALID", "DRIVER_TIMEOUT", "DRIVER_HARD",NULL };
+
+static const char * driversuggest_table[]={"SUGGEST_OK",
+"SUGGEST_RETRY", "SUGGEST_ABORT", "SUGGEST_REMAP", "SUGGEST_DIE",
+unknown,unknown,unknown, "SUGGEST_SENSE",NULL};
+
+
+void print_driverbyte(int scsiresult)
+{   static int driver_max=0,suggest_max=0;
+    int i,dr=driver_byte(scsiresult)&DRIVER_MASK, 
+       su=(driver_byte(scsiresult)&SUGGEST_MASK)>>4;
+
+    if(!driver_max) {
+        for(i=0;driverbyte_table[i];i++) ;
+        driver_max=i;
+       for(i=0;driversuggest_table[i];i++) ;
+       suggest_max=i;
+    }
+    printk("Driverbyte=0x%02x",driver_byte(scsiresult));
+    printk("(%s,%s) ",
+       dr<driver_max  ? driverbyte_table[dr]:"invalid",
+       su<suggest_max ? driversuggest_table[su]:"invalid");
+}
+#else
+void print_driverbyte(int scsiresult)
+{   printk("Driverbyte=0x%02x ",driver_byte(scsiresult));
+}
+#endif
+
+/*
+ * Overrides for Emacs so that we almost follow Linus's tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only.  This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-indent-level: 4
+ * c-brace-imaginary-offset: 0
+ * c-brace-offset: -4
+ * c-argdecl-indent: 4
+ * c-label-offset: -4
+ * c-continued-statement-offset: 4
+ * c-continued-brace-offset: 0
+ * indent-tabs-mode: nil
+ * tab-width: 8
+ * End:
+ */
diff --git a/xen-2.4.16/drivers/scsi/hosts.c b/xen-2.4.16/drivers/scsi/hosts.c
new file mode 100644 (file)
index 0000000..ea613aa
--- /dev/null
@@ -0,0 +1,316 @@
+/*
+ *  hosts.c Copyright (C) 1992 Drew Eckhardt
+ *          Copyright (C) 1993, 1994, 1995 Eric Youngdale
+ *
+ *  mid to lowlevel SCSI driver interface
+ *      Initial versions: Drew Eckhardt
+ *      Subsequent revisions: Eric Youngdale
+ *
+ *  <drew@colorado.edu>
+ *
+ *  Jiffies wrap fixes (host->resetting), 3 Dec 1998 Andrea Arcangeli
+ *  Added QLOGIC QLA1280 SCSI controller kernel host support. 
+ *     August 4, 1999 Fred Lewis, Intel DuPont
+ *
+ *  Updated to reflect the new initialization scheme for the higher 
+ *  level of scsi drivers (sd/sr/st)
+ *  September 17, 2000 Torben Mathiasen <tmm@image.dk>
+ */
+
+
+/*
+ *  This file contains the medium level SCSI
+ *  host interface initialization, as well as the scsi_hosts array of SCSI
+ *  hosts currently present in the system.
+ */
+
+#define __NO_VERSION__
+#include <xeno/module.h>
+#include <xeno/blk.h>
+/*  #include <xeno/kernel.h> */
+/*  #include <xeno/string.h> */
+/*  #include <xeno/mm.h> */
+/*  #include <xeno/proc_fs.h> */
+#include <xeno/init.h>
+
+#define __KERNEL_SYSCALLS__
+
+/* #include <xeno/unistd.h> */
+
+#include "scsi.h"
+#include "hosts.h"
+
+/*
+static const char RCSid[] = "$Header: /vger/u4/cvs/linux/drivers/scsi/hosts.c,v 1.20 1996/12/12 19:18:32 davem Exp $";
+*/
+
+/*
+ *  The scsi host entries should be in the order you wish the
+ *  cards to be detected.  A driver may appear more than once IFF
+ *  it can deal with being detected (and therefore initialized)
+ *  with more than one simultaneous host number, can handle being
+ *  reentrant, etc.
+ *
+ *  They may appear in any order, as each SCSI host is told which host 
+ *  number it is during detection.
+ */
+
+/* This is a placeholder for controllers that are not configured into
+ * the system - we do this to ensure that the controller numbering is
+ * always consistent, no matter how the kernel is configured. */
+
+#define NO_CONTROLLER {NULL, NULL, NULL, NULL, NULL, NULL, NULL, \
+                          NULL, NULL, 0, 0, 0, 0, 0, 0}
+
+/*
+ *  When figure is run, we don't want to link to any object code.  Since
+ *  the macro for each host will contain function pointers, we cannot
+ *  use it and instead must use a "blank" that does no such
+ *  idiocy.
+ */
+
+Scsi_Host_Template * scsi_hosts;
+
+
+/*
+ *  Our semaphores and timeout counters, where size depends on 
+ *      MAX_SCSI_HOSTS here.
+ */
+
+Scsi_Host_Name * scsi_host_no_list;
+struct Scsi_Host * scsi_hostlist;
+struct Scsi_Device_Template * scsi_devicelist;
+
+int max_scsi_hosts;
+int next_scsi_host;
+
+void
+scsi_unregister(struct Scsi_Host * sh){
+    struct Scsi_Host * shpnt;
+    Scsi_Host_Name *shn;
+        
+    if(scsi_hostlist == sh)
+       scsi_hostlist = sh->next;
+    else {
+       shpnt = scsi_hostlist;
+       while(shpnt->next != sh) shpnt = shpnt->next;
+       shpnt->next = shpnt->next->next;
+    }
+
+    /*
+     * We have to unregister the host from the scsi_host_no_list as well.
+     * Decide by the host_no not by the name because most host drivers are
+     * able to handle more than one adapters from the same kind (or family).
+     */
+    for ( shn=scsi_host_no_list; shn && (sh->host_no != shn->host_no);
+         shn=shn->next);
+    if (shn) shn->host_registered = 0;
+    /* else {} : This should not happen, we should panic here... */
+    
+    /* If we are removing the last host registered, it is safe to reuse
+     * its host number (this avoids "holes" at boot time) (DB) 
+     * It is also safe to reuse those of numbers directly below which have
+     * been released earlier (to avoid some holes in numbering).
+     */
+    if(sh->host_no == max_scsi_hosts - 1) {
+       while(--max_scsi_hosts >= next_scsi_host) {
+           shpnt = scsi_hostlist;
+           while(shpnt && shpnt->host_no != max_scsi_hosts - 1)
+               shpnt = shpnt->next;
+           if(shpnt)
+               break;
+       }
+    }
+    next_scsi_host--;
+    kfree((char *) sh);
+}
+
+/* We call this when we come across a new host adapter. We only do this
+ * once we are 100% sure that we want to use this host adapter -  it is a
+ * pain to reverse this, so we try to avoid it 
+ */
+
+struct Scsi_Host * scsi_register(Scsi_Host_Template * tpnt, int j){
+    struct Scsi_Host * retval, *shpnt, *o_shp;
+    Scsi_Host_Name *shn, *shn2;
+    int flag_new = 1;
+    const char * hname;
+    size_t hname_len;
+    retval = (struct Scsi_Host *)kmalloc(sizeof(struct Scsi_Host) + j,
+                                        (tpnt->unchecked_isa_dma && j ? 
+                                         GFP_DMA : 0) | GFP_ATOMIC);
+    if(retval == NULL)
+    {
+        printk("scsi: out of memory in scsi_register.\n");
+       return NULL;
+    }
+       
+    memset(retval, 0, sizeof(struct Scsi_Host) + j);
+
+    /* trying to find a reserved entry (host_no) */
+    hname = (tpnt->proc_name) ?  tpnt->proc_name : "";
+    hname_len = strlen(hname);
+    for (shn = scsi_host_no_list;shn;shn = shn->next) {
+       if (!(shn->host_registered) && 
+           (hname_len > 0) && (0 == strncmp(hname, shn->name, hname_len))) {
+           flag_new = 0;
+           retval->host_no = shn->host_no;
+           shn->host_registered = 1;
+           shn->loaded_as_module = 1;
+           break;
+       }
+    }
+    atomic_set(&retval->host_active,0);
+    retval->host_busy = 0;
+    retval->host_failed = 0;
+    if(j > 0xffff) panic("Too many extra bytes requested\n");
+    retval->extra_bytes = j;
+    retval->loaded_as_module = 1;
+    if (flag_new) {
+       shn = (Scsi_Host_Name *) kmalloc(sizeof(Scsi_Host_Name), GFP_ATOMIC);
+        if (!shn) {
+                kfree(retval);
+                printk(KERN_ERR "scsi: out of memory(2) in scsi_register.\n");
+                return NULL;
+        }
+       shn->name = kmalloc(hname_len + 1, GFP_ATOMIC);
+       if (hname_len > 0)
+           strncpy(shn->name, hname, hname_len);
+       shn->name[hname_len] = 0;
+       shn->host_no = max_scsi_hosts++;
+       shn->host_registered = 1;
+       shn->loaded_as_module = 1;
+       shn->next = NULL;
+       if (scsi_host_no_list) {
+           for (shn2 = scsi_host_no_list;shn2->next;shn2 = shn2->next)
+               ;
+           shn2->next = shn;
+       }
+       else
+           scsi_host_no_list = shn;
+       retval->host_no = shn->host_no;
+    }
+    next_scsi_host++;
+    retval->host_queue = NULL;
+#if 0
+    init_waitqueue_head(&retval->host_wait);
+#endif
+    retval->resetting = 0;
+    retval->last_reset = 0;
+    retval->irq = 0;
+    retval->dma_channel = 0xff;
+
+    /* These three are default values which can be overridden */
+    retval->max_channel = 0; 
+    retval->max_id = 8;      
+    retval->max_lun = 8;
+
+    /*
+     * All drivers right now should be able to handle 12 byte commands.
+     * Every so often there are requests for 16 byte commands, but individual
+     * low-level drivers need to certify that they actually do something
+     * sensible with such commands.
+     */
+    retval->max_cmd_len = 12;
+
+    retval->unique_id = 0;
+    retval->io_port = 0;
+    retval->hostt = tpnt;
+    retval->next = NULL;
+    retval->in_recovery = 0;
+    retval->ehandler = NULL;    /* Initial value until the thing starts up. */
+    retval->eh_notify   = NULL;    /* Who we notify when we exit. */
+
+
+    retval->host_blocked = FALSE;
+    retval->host_self_blocked = FALSE;
+
+#ifdef DEBUG
+    printk("Register %x %x: %d\n", (int)retval, (int)retval->hostt, j);
+#endif
+
+    /* The next six are the default values which can be overridden
+     * if need be */
+    retval->this_id = tpnt->this_id;
+    retval->can_queue = tpnt->can_queue;
+    retval->sg_tablesize = tpnt->sg_tablesize;
+    retval->cmd_per_lun = tpnt->cmd_per_lun;
+    retval->unchecked_isa_dma = tpnt->unchecked_isa_dma;
+    retval->use_clustering = tpnt->use_clustering;   
+
+    retval->select_queue_depths = tpnt->select_queue_depths;
+    retval->max_sectors = tpnt->max_sectors;
+
+    if(!scsi_hostlist)
+       scsi_hostlist = retval;
+    else {
+       shpnt = scsi_hostlist;
+       if (retval->host_no < shpnt->host_no) {
+           retval->next = shpnt;
+           wmb(); /* want all to see these writes in this order */
+           scsi_hostlist = retval;
+       }
+       else {
+           for (o_shp = shpnt, shpnt = shpnt->next; shpnt; 
+                o_shp = shpnt, shpnt = shpnt->next) {
+               if (retval->host_no < shpnt->host_no) {
+                   retval->next = shpnt;
+                   wmb();
+                   o_shp->next = retval;
+                   break;
+               }
+           }
+           if (! shpnt)
+               o_shp->next = retval;
+        }
+    }
+    
+    return retval;
+}
+
+int
+scsi_register_device(struct Scsi_Device_Template * sdpnt)
+{
+    if(sdpnt->next) panic("Device already registered");
+    sdpnt->next = scsi_devicelist;
+    scsi_devicelist = sdpnt;
+    return 0;
+}
+
+void
+scsi_deregister_device(struct Scsi_Device_Template * tpnt)
+{
+    struct Scsi_Device_Template *spnt;
+    struct Scsi_Device_Template *prev_spnt;
+
+    spnt = scsi_devicelist;
+    prev_spnt = NULL;
+    while (spnt != tpnt) {
+    prev_spnt = spnt;
+    spnt = spnt->next;
+    }
+    if (prev_spnt == NULL)
+        scsi_devicelist = tpnt->next;
+    else
+        prev_spnt->next = spnt->next;
+}
+
+
+/*
+ * Overrides for Emacs so that we follow Linus's tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only.  This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-indent-level: 4
+ * c-brace-imaginary-offset: 0
+ * c-brace-offset: -4
+ * c-argdecl-indent: 4
+ * c-label-offset: -4
+ * c-continued-statement-offset: 4
+ * c-continued-brace-offset: 0
+ * indent-tabs-mode: nil
+ * tab-width: 8
+ * End:
+ */
index d222fc61dce7ebde757cec1163ea8d7b5e77185c..34d3592e0e7487c8b0ec2565a6b36b755338fcfb 100644 (file)
@@ -64,6 +64,7 @@ typedef struct        SHT
     /* Used with loadable modules so that we know when it is safe to unload */
     struct module * module;
 
+#ifdef CONFIG_PROC_FS
     /* The pointer to the /proc/scsi directory entry */
     struct proc_dir_entry *proc_dir;
 
@@ -73,6 +74,7 @@ typedef struct        SHT
      * to feed the driver with information. Check eata_dma_proc.c for reference
      */
     int (*proc_info)(char *, char **, off_t, int, int, int);
+#endif
 
     /*
      * The name pointer is a pointer to the name of the SCSI
@@ -326,7 +328,9 @@ struct Scsi_Host
                                           host. */
     unsigned int            eh_active:1; /* Indicates the eh thread is awake and active if
                                           this is true. */
+#if 0 
     wait_queue_head_t       host_wait;
+#endif
     Scsi_Host_Template    * hostt;
     atomic_t                host_active; /* commands checked out */
     volatile unsigned short host_busy;   /* commands actually active on low-level */
@@ -528,9 +532,11 @@ void  scsi_initialize_queue(Scsi_Device * SDpnt, struct Scsi_Host * SHpnt);
 int scsi_register_device(struct Scsi_Device_Template * sdpnt);
 void scsi_deregister_device(struct Scsi_Device_Template * tpnt);
 
+#if 0 
 /* These are used by loadable modules */
 extern int scsi_register_module(int, void *);
 extern int scsi_unregister_module(int, void *);
+#endif
 
 /* The different types of modules that we can load and unload */
 #define MODULE_SCSI_HA 1
index 320b7cd1a995f4c42d62f4e3101f579a96df2f96..85a59f54ac9a64971412d05ef7c15facb0187157 100644 (file)
@@ -149,6 +149,7 @@ const char *const scsi_device_types[MAX_SCSI_DEVICE_CODE] =
 extern void scsi_times_out(Scsi_Cmnd * SCpnt);
 void scsi_build_commandblocks(Scsi_Device * SDpnt);
 
+#if 0
 /*
  * These are the interface to the old error handling code.  It should go away
  * someday soon.
@@ -156,6 +157,7 @@ void scsi_build_commandblocks(Scsi_Device * SDpnt);
 extern void scsi_old_done(Scsi_Cmnd * SCpnt);
 extern void scsi_old_times_out(Scsi_Cmnd * SCpnt);
 extern int scsi_old_reset(Scsi_Cmnd *SCpnt, unsigned int flag);
+#endif
 
 /* 
  * Private interface into the new error handling code.
@@ -201,21 +203,24 @@ MODULE_PARM(scsi_logging_level, "i");
 MODULE_PARM_DESC(scsi_logging_level, "SCSI logging level; should be zero or nonzero");
 
 #else
-
 static int __init scsi_logging_setup(char *str)
 {
-       int tmp;
+#if 0
+    int tmp;
+    
+    if (get_option(&str, &tmp) == 1) {
+        scsi_logging_level = (tmp ? ~0 : 0);
+        return 1;
+    } else {
+        printk(KERN_INFO "scsi_logging_setup : usage scsi_logging_level=n "
+               "(n should be 0 or non-zero)\n");
+        return 0;
+    }
+#else
+    return 0; 
+#endif
 
-       if (get_option(&str, &tmp) == 1) {
-               scsi_logging_level = (tmp ? ~0 : 0);
-               return 1;
-       } else {
-               printk(KERN_INFO "scsi_logging_setup : usage scsi_logging_level=n "
-                      "(n should be 0 or non-zero)\n");
-               return 0;
-       }
 }
-
 __setup("scsi_logging=", scsi_logging_setup);
 
 #endif
@@ -226,14 +231,16 @@ __setup("scsi_logging=", scsi_logging_setup);
  
 static void scsi_wait_done(Scsi_Cmnd * SCpnt)
 {
-       struct request *req;
-
-       req = &SCpnt->request;
-       req->rq_status = RQ_SCSI_DONE;  /* Busy, but indicate request done */
-
-       if (req->waiting != NULL) {
-               complete(req->waiting);
-       }
+    struct request *req;
+    
+    req = &SCpnt->request;
+    req->rq_status = RQ_SCSI_DONE;     /* Busy, but indicate request done */
+    
+#if 0
+    if (req->waiting != NULL) {
+        complete(req->waiting);
+    }
+#endif
 }
 
 /*
@@ -269,24 +276,24 @@ static spinlock_t scsi_bhqueue_lock = SPIN_LOCK_UNLOCKED;
 
 Scsi_Request *scsi_allocate_request(Scsi_Device * device)
 {
-       Scsi_Request *SRpnt = NULL;
-  
-       if (!device)
-               panic("No device passed to scsi_allocate_request().\n");
-  
-       SRpnt = (Scsi_Request *) kmalloc(sizeof(Scsi_Request), GFP_ATOMIC);
-       if( SRpnt == NULL )
-       {
-               return NULL;
-       }
-
-       memset(SRpnt, 0, sizeof(Scsi_Request));
-       SRpnt->sr_device = device;
-       SRpnt->sr_host = device->host;
-       SRpnt->sr_magic = SCSI_REQ_MAGIC;
-       SRpnt->sr_data_direction = SCSI_DATA_UNKNOWN;
-
-       return SRpnt;
+    Scsi_Request *SRpnt = NULL;
+    
+    if (!device)
+        panic("No device passed to scsi_allocate_request().\n");
+    
+    SRpnt = (Scsi_Request *) kmalloc(sizeof(Scsi_Request), GFP_ATOMIC);
+    if( SRpnt == NULL )
+    {
+        return NULL;
+    }
+    
+    memset(SRpnt, 0, sizeof(Scsi_Request));
+    SRpnt->sr_device = device;
+    SRpnt->sr_host = device->host;
+    SRpnt->sr_magic = SCSI_REQ_MAGIC;
+    SRpnt->sr_data_direction = SCSI_DATA_UNKNOWN;
+    
+    return SRpnt;
 }
 
 /*
@@ -308,13 +315,20 @@ Scsi_Request *scsi_allocate_request(Scsi_Device * device)
  */
 void scsi_release_request(Scsi_Request * req)
 {
-       if( req->sr_command != NULL )
-       {
-               scsi_release_command(req->sr_command);
-               req->sr_command = NULL;
-       }
-
-       kfree(req);
+    if( req->sr_command != NULL )
+    {
+#ifdef SMH_DEBUG
+        printk("scsi_release_request: req->sr_command = %p\n", 
+                   req->sr_command); 
+#endif
+        scsi_release_command(req->sr_command);
+        req->sr_command = NULL;
+#ifdef SMHHACK 
+        req->freeaddr = 0x1234; 
+#endif
+    }
+    
+    kfree(req);
 }
 
 /*
@@ -425,6 +439,9 @@ Scsi_Cmnd *scsi_allocate_device(Scsi_Device * device, int wait,
                 * wait here.
                 */
                if (wait) {
+                    printk("XXX smh: scsi cannot wait for free cmd block.\n"); 
+                    BUG(); 
+#if 0 
                         DECLARE_WAITQUEUE(wait, current);
 
                         /*
@@ -471,6 +488,7 @@ Scsi_Cmnd *scsi_allocate_device(Scsi_Device * device, int wait,
                                         return NULL;
                                 }
                         }
+#endif
                } else {
                         spin_unlock_irqrestore(&device_request_lock, flags);
                        return NULL;
@@ -546,17 +564,22 @@ inline void __scsi_release_command(Scsi_Cmnd * SCpnt)
            && SCpnt->host->host_busy == SCpnt->host->host_failed) {
                SCSI_LOG_ERROR_RECOVERY(5, printk("Waking error handler thread (%d)\n",
                             atomic_read(&SCpnt->host->eh_wait->count)));
+#if 0
                up(SCpnt->host->eh_wait);
+#endif
        }
 
        spin_unlock_irqrestore(&device_request_lock, flags);
 
+#if 0
         /*
          * Wake up anyone waiting for this device.  Do this after we
          * have released the lock, as they will need it as soon as
          * they wake up.  
          */
        wake_up(&SDpnt->scpnt_wait);
+#endif
+
 }
 
 /*
@@ -667,8 +690,10 @@ int scsi_dispatch_cmd(Scsi_Cmnd * SCpnt)
        if (host->hostt->use_new_eh_code) {
                scsi_add_timer(SCpnt, SCpnt->timeout_per_command, scsi_times_out);
        } else {
+#if 0
                scsi_add_timer(SCpnt, SCpnt->timeout_per_command,
                               scsi_old_times_out);
+#endif
        }
 
        /*
@@ -717,7 +742,8 @@ int scsi_dispatch_cmd(Scsi_Cmnd * SCpnt)
                         * Before we queue this command, check if the command
                         * length exceeds what the host adapter can handle.
                         */
-                       if (CDB_SIZE(SCpnt) <= SCpnt->host->max_cmd_len) {
+#if 0
+                    if (CDB_SIZE(SCpnt) <= SCpnt->host->max_cmd_len) {
                                spin_lock_irqsave(&io_request_lock, flags);
                                host->hostt->queuecommand(SCpnt, scsi_old_done);
                                spin_unlock_irqrestore(&io_request_lock, flags);
@@ -729,6 +755,8 @@ int scsi_dispatch_cmd(Scsi_Cmnd * SCpnt)
                                spin_unlock_irqrestore(&io_request_lock, flags);
                                rtn = 1;
                        }
+#endif
+
                }
        } else {
                int temp;
@@ -751,7 +779,9 @@ int scsi_dispatch_cmd(Scsi_Cmnd * SCpnt)
                if (host->hostt->use_new_eh_code) {
                        scsi_done(SCpnt);
                } else {
+#if 0
                        scsi_old_done(SCpnt);
+#endif
                }
                 spin_unlock_irqrestore(&io_request_lock, flags);
        }
@@ -759,7 +789,9 @@ int scsi_dispatch_cmd(Scsi_Cmnd * SCpnt)
        return rtn;
 }
 
+#ifdef DEVFS_MUST_DIE
 devfs_handle_t scsi_devfs_handle;
+#endif
 
 /*
  * scsi_do_cmd sends all the commands out to the low-level driver.  It
@@ -772,22 +804,50 @@ void scsi_wait_req (Scsi_Request * SRpnt, const void *cmnd ,
                  void *buffer, unsigned bufflen, 
                  int timeout, int retries)
 {
-       DECLARE_COMPLETION(wait);
-       request_queue_t *q = &SRpnt->sr_device->request_queue;
-       
-       SRpnt->sr_request.waiting = &wait;
-       SRpnt->sr_request.rq_status = RQ_SCSI_BUSY;
-       scsi_do_req (SRpnt, (void *) cmnd,
-               buffer, bufflen, scsi_wait_done, timeout, retries);
-       generic_unplug_device(q);
-       wait_for_completion(&wait);
-       SRpnt->sr_request.waiting = NULL;
-       if( SRpnt->sr_command != NULL )
-       {
-               scsi_release_command(SRpnt->sr_command);
-               SRpnt->sr_command = NULL;
-       }
+#if 0
+    DECLARE_COMPLETION(wait);
+#endif
+
+
+    request_queue_t *q = &SRpnt->sr_device->request_queue;
+    
+#if 0
+    SRpnt->sr_request.waiting = &wait;
+#endif
+
 
+    SRpnt->sr_request.rq_status = RQ_SCSI_BUSY;
+    scsi_do_req (SRpnt, (void *) cmnd,
+                 buffer, bufflen, scsi_wait_done, timeout, retries);
+    generic_unplug_device(q);
+
+
+#if 0
+    wait_for_completion(&wait);
+#endif
+
+    /* XXX SMH: in 'standard' driver we think everythings ok here since
+       we've waited on &wait -- hence we deallocate the command structure
+       if it hasn't been done already. This is not the correct behaviour 
+       in xen ... hmm .. how to fix? */
+    mdelay(500); 
+
+
+    SRpnt->sr_request.waiting = NULL;
+
+    if( SRpnt->sr_command != NULL )
+    {
+#ifdef SMH_DEBUG
+        printk("scsi_wait_req: releasing SRpnt->sr_command = %p\n", 
+               SRpnt->sr_command); 
+#endif
+        scsi_release_command(SRpnt->sr_command);
+        SRpnt->sr_command = NULL;
+#ifdef SMHHACK 
+        SRpnt->freeaddr = 0x99991234; 
+#endif
+    }
+    
 }
  
 /*
@@ -850,29 +910,40 @@ void scsi_do_req(Scsi_Request * SRpnt, const void *cmnd,
         * be allocated later when this request is getting queued.
         */
        if( SRpnt->sr_command != NULL )
-       {
+       { 
+#ifdef SMH_DEBUG
+           printk("scsi_do_req: releasing SRpnt->sr_command = %p\n", 
+                   SRpnt->sr_command); 
+#endif
                scsi_release_command(SRpnt->sr_command);
                SRpnt->sr_command = NULL;
+#ifdef SMHHACK
+                SRpnt->freeaddr = 0xabbadead;
+#endif
        }
 
        /*
-        * We must prevent reentrancy to the lowlevel host driver.  This prevents
-        * it - we enter a loop until the host we want to talk to is not busy.
-        * Race conditions are prevented, as interrupts are disabled in between the
-        * time we check for the host being not busy, and the time we mark it busy
-        * ourselves.
-        */
+        * We must prevent reentrancy to the lowlevel host driver.
+        * This prevents it - we enter a loop until the host we want
+        * to talk to is not busy.  Race conditions are prevented, as
+        * interrupts are disabled in between the time we check for
+        * the host being not busy, and the time we mark it busy
+        * ourselves.  */
 
 
        /*
-        * Our own function scsi_done (which marks the host as not busy, disables
-        * the timeout counter, etc) will be called by us or by the
-        * scsi_hosts[host].queuecommand() function needs to also call
-        * the completion function for the high level driver.
-        */
+        * Our own function scsi_done (which marks the host as not
+        * busy, disables the timeout counter, etc) will be called by
+        * us or by the scsi_hosts[host].queuecommand() function needs
+        * to also call the completion function for the high level
+        * driver.  */
 
        memcpy((void *) SRpnt->sr_cmnd, (const void *) cmnd, 
               sizeof(SRpnt->sr_cmnd));
+#ifdef SMHHACK
+        SRpnt->freeaddr = 0x1111; 
+#endif
+
        SRpnt->sr_bufflen = bufflen;
        SRpnt->sr_buffer = buffer;
        SRpnt->sr_allowed = retries;
@@ -924,6 +995,10 @@ void scsi_init_cmd_from_req(Scsi_Cmnd * SCpnt, Scsi_Request * SRpnt)
 
        SCpnt->owner = SCSI_OWNER_MIDLEVEL;
        SRpnt->sr_command = SCpnt;
+#ifdef SMH_DEBUG
+        printk("scsi_init_cmd_from_req: SRpnt = %p, SRpnt->sr_command = %p\n", 
+               SRpnt, SRpnt->sr_command); 
+#endif        
 
        if (!host) {
                panic("Invalid or not present host.\n");
@@ -1218,103 +1293,113 @@ void scsi_done(Scsi_Cmnd * SCpnt)
  */
 void scsi_bottom_half_handler(void)
 {
-       Scsi_Cmnd *SCpnt;
-       Scsi_Cmnd *SCnext;
-       unsigned long flags;
-
-
-       while (1 == 1) {
-               spin_lock_irqsave(&scsi_bhqueue_lock, flags);
-               SCpnt = scsi_bh_queue_head;
-               scsi_bh_queue_head = NULL;
-               spin_unlock_irqrestore(&scsi_bhqueue_lock, flags);
-
-               if (SCpnt == NULL) {
-                       return;
-               }
-               SCnext = SCpnt->bh_next;
-
-               for (; SCpnt; SCpnt = SCnext) {
-                       SCnext = SCpnt->bh_next;
-
-                       switch (scsi_decide_disposition(SCpnt)) {
-                       case SUCCESS:
-                               /*
-                                * Add to BH queue.
-                                */
-                               SCSI_LOG_MLCOMPLETE(3, printk("Command finished %d %d 0x%x\n", SCpnt->host->host_busy,
-                                               SCpnt->host->host_failed,
-                                                        SCpnt->result));
-
-                               scsi_finish_command(SCpnt);
-                               break;
-                       case NEEDS_RETRY:
-                               /*
-                                * We only come in here if we want to retry a command.  The
-                                * test to see whether the command should be retried should be
-                                * keeping track of the number of tries, so we don't end up looping,
-                                * of course.
-                                */
-                               SCSI_LOG_MLCOMPLETE(3, printk("Command needs retry %d %d 0x%x\n", SCpnt->host->host_busy,
-                               SCpnt->host->host_failed, SCpnt->result));
+    Scsi_Cmnd *SCpnt;
+    Scsi_Cmnd *SCnext;
+    unsigned long flags;
 
-                               scsi_retry_command(SCpnt);
-                               break;
-                       case ADD_TO_MLQUEUE:
-                               /* 
-                                * This typically happens for a QUEUE_FULL message -
-                                * typically only when the queue depth is only
-                                * approximate for a given device.  Adding a command
-                                * to the queue for the device will prevent further commands
-                                * from being sent to the device, so we shouldn't end up
-                                * with tons of things being sent down that shouldn't be.
-                                */
-                               SCSI_LOG_MLCOMPLETE(3, printk("Command rejected as device queue full, put on ml queue %p\n",
-                                                              SCpnt));
-                               scsi_mlqueue_insert(SCpnt, SCSI_MLQUEUE_DEVICE_BUSY);
-                               break;
-                       default:
-                               /*
-                                * Here we have a fatal error of some sort.  Turn it over to
-                                * the error handler.
-                                */
-                               SCSI_LOG_MLCOMPLETE(3, printk("Command failed %p %x active=%d busy=%d failed=%d\n",
-                                                   SCpnt, SCpnt->result,
-                                 atomic_read(&SCpnt->host->host_active),
-                                                 SCpnt->host->host_busy,
-                                             SCpnt->host->host_failed));
-
-                               /*
-                                * Dump the sense information too.
-                                */
-                               if ((status_byte(SCpnt->result) & CHECK_CONDITION) != 0) {
-                                       SCSI_LOG_MLCOMPLETE(3, print_sense("bh", SCpnt));
-                               }
-                               if (SCpnt->host->eh_wait != NULL) {
-                                       SCpnt->host->host_failed++;
-                                       SCpnt->owner = SCSI_OWNER_ERROR_HANDLER;
-                                       SCpnt->state = SCSI_STATE_FAILED;
-                                       SCpnt->host->in_recovery = 1;
-                                       /*
-                                        * If the host is having troubles, then look to see if this was the last
-                                        * command that might have failed.  If so, wake up the error handler.
-                                        */
-                                       if (SCpnt->host->host_busy == SCpnt->host->host_failed) {
-                                               SCSI_LOG_ERROR_RECOVERY(5, printk("Waking error handler thread (%d)\n",
-                                                                                 atomic_read(&SCpnt->host->eh_wait->count)));
-                                               up(SCpnt->host->eh_wait);
-                                       }
-                               } else {
-                                       /*
-                                        * We only get here if the error recovery thread has died.
-                                        */
-                                       scsi_finish_command(SCpnt);
-                               }
-                       }
-               }               /* for(; SCpnt...) */
 
-       }                       /* while(1==1) */
+    while (1 == 1) {
+        spin_lock_irqsave(&scsi_bhqueue_lock, flags);
+        SCpnt = scsi_bh_queue_head;
+        scsi_bh_queue_head = NULL;
+        spin_unlock_irqrestore(&scsi_bhqueue_lock, flags);
 
+        if (SCpnt == NULL) {
+            return;
+        }
+        SCnext = SCpnt->bh_next;
+
+        for (; SCpnt; SCpnt = SCnext) {
+            SCnext = SCpnt->bh_next;
+
+            switch (scsi_decide_disposition(SCpnt)) {
+            case SUCCESS:
+                /*
+                 * Add to BH queue.
+                 */
+                SCSI_LOG_MLCOMPLETE(3, 
+                                    printk("Command finished %d %d 0x%x\n", 
+                                           SCpnt->host->host_busy,
+                                           SCpnt->host->host_failed,
+                                           SCpnt->result));
+                
+                scsi_finish_command(SCpnt);
+                break;
+            case NEEDS_RETRY:
+                /*
+                 * We only come in here if we want to retry a command.
+                 * The test to see whether the command should be
+                 * retried should be keeping track of the number of
+                 * tries, so we don't end up looping, of course.  */
+                SCSI_LOG_MLCOMPLETE(3, 
+                                    printk("Command needs retry %d %d 0x%x\n",
+                                           SCpnt->host->host_busy, 
+                                           SCpnt->host->host_failed, 
+                                           SCpnt->result));
+
+                scsi_retry_command(SCpnt);
+                break;
+            case ADD_TO_MLQUEUE:
+                /* 
+                 * This typically happens for a QUEUE_FULL message -
+                 * typically only when the queue depth is only
+                 * approximate for a given device.  Adding a command
+                 * to the queue for the device will prevent further commands
+                 * from being sent to the device, so we shouldn't end up
+                 * with tons of things being sent down that shouldn't be.
+                 */
+                SCSI_LOG_MLCOMPLETE(3, printk(
+                    "Cmnd rejected as device queue full, put on ml queue %p\n",
+                    SCpnt));
+                scsi_mlqueue_insert(SCpnt, SCSI_MLQUEUE_DEVICE_BUSY);
+                break;
+            default:
+                /*
+                 * Here we have a fatal error of some sort.  Turn it over to
+                 * the error handler.
+                 */
+                SCSI_LOG_MLCOMPLETE(3, printk(
+                    "Command failed %p %x active=%d busy=%d failed=%d\n",
+                    SCpnt, SCpnt->result,
+                    atomic_read(&SCpnt->host->host_active),
+                    SCpnt->host->host_busy,
+                    SCpnt->host->host_failed));
+                
+                /*
+                 * Dump the sense information too.
+                 */
+                if ((status_byte(SCpnt->result) & CHECK_CONDITION) != 0) {
+                    SCSI_LOG_MLCOMPLETE(3, print_sense("bh", SCpnt));
+                }
+                if (SCpnt->host->eh_wait != NULL) {
+                    SCpnt->host->host_failed++;
+                    SCpnt->owner = SCSI_OWNER_ERROR_HANDLER;
+                    SCpnt->state = SCSI_STATE_FAILED;
+                    SCpnt->host->in_recovery = 1;
+                    /*
+                     * If the host is having troubles, then look to
+                     * see if this was the last command that might
+                     * have failed.  If so, wake up the error handler.  */
+                    if (SCpnt->host->host_busy == SCpnt->host->host_failed) {
+                        SCSI_LOG_ERROR_RECOVERY(5, printk(
+                            "Waking error handler thread (%d)\n",
+                            atomic_read(&SCpnt->host->eh_wait->count)));
+#if 0
+                        up(SCpnt->host->eh_wait);
+#endif
+                    }
+                } else {
+                    /*
+                     * We only get here if the error recovery thread has died.
+                     */
+                    printk("scsi_bh: error finish\n"); 
+                    scsi_finish_command(SCpnt);
+                }
+            }
+        }              /* for(; SCpnt...) */
+        
+    }                  /* while(1==1) */
+    
 }
 
 /*
@@ -1356,68 +1441,77 @@ int scsi_retry_command(Scsi_Cmnd * SCpnt)
  */
 void scsi_finish_command(Scsi_Cmnd * SCpnt)
 {
-       struct Scsi_Host *host;
-       Scsi_Device *device;
-       Scsi_Request * SRpnt;
-       unsigned long flags;
-
-       ASSERT_LOCK(&io_request_lock, 0);
-
-       host = SCpnt->host;
-       device = SCpnt->device;
-
-        /*
-         * We need to protect the decrement, as otherwise a race condition
-         * would exist.  Fiddling with SCpnt isn't a problem as the
-         * design only allows a single SCpnt to be active in only
-         * one execution context, but the device and host structures are
-         * shared.
-         */
-       spin_lock_irqsave(&io_request_lock, flags);
-       host->host_busy--;      /* Indicate that we are free */
-       device->device_busy--;  /* Decrement device usage counter. */
-       spin_unlock_irqrestore(&io_request_lock, flags);
-
-        /*
-         * Clear the flags which say that the device/host is no longer
-         * capable of accepting new commands.  These are set in scsi_queue.c
-         * for both the queue full condition on a device, and for a
-         * host full condition on the host.
-         */
-        host->host_blocked = FALSE;
-        device->device_blocked = FALSE;
-
-       /*
-        * If we have valid sense information, then some kind of recovery
-        * must have taken place.  Make a note of this.
-        */
-       if (scsi_sense_valid(SCpnt)) {
-               SCpnt->result |= (DRIVER_SENSE << 24);
-       }
-       SCSI_LOG_MLCOMPLETE(3, printk("Notifying upper driver of completion for device %d %x\n",
-                                     SCpnt->device->id, SCpnt->result));
-
-       SCpnt->owner = SCSI_OWNER_HIGHLEVEL;
-       SCpnt->state = SCSI_STATE_FINISHED;
-
-       /* We can get here with use_sg=0, causing a panic in the upper level (DB) */
-       SCpnt->use_sg = SCpnt->old_use_sg;
+    struct Scsi_Host *host;
+    Scsi_Device *device;
+    Scsi_Request * SRpnt;
+    unsigned long flags;
+
+    ASSERT_LOCK(&io_request_lock, 0);
+
+    host = SCpnt->host;
+    device = SCpnt->device;
+
+    /*
+     * We need to protect the decrement, as otherwise a race condition
+     * would exist.  Fiddling with SCpnt isn't a problem as the
+     * design only allows a single SCpnt to be active in only
+     * one execution context, but the device and host structures are
+     * shared.
+     */
+    spin_lock_irqsave(&io_request_lock, flags);
+    host->host_busy--; /* Indicate that we are free */
+    device->device_busy--;     /* Decrement device usage counter. */
+    spin_unlock_irqrestore(&io_request_lock, flags);
+    
+    /*
+     * Clear the flags which say that the device/host is no longer
+     * capable of accepting new commands.  These are set in scsi_queue.c
+     * for both the queue full condition on a device, and for a
+     * host full condition on the host.
+     */
+    host->host_blocked = FALSE;
+    device->device_blocked = FALSE;
+    
+    /*
+     * If we have valid sense information, then some kind of recovery
+     * must have taken place.  Make a note of this.
+     */
+    if (scsi_sense_valid(SCpnt)) {
+        SCpnt->result |= (DRIVER_SENSE << 24);
+    }
+    SCSI_LOG_MLCOMPLETE(3, printk(
+        "Notifying upper driver of completion for device %d %x\n",
+        SCpnt->device->id, SCpnt->result));
 
-       /*
-       * If there is an associated request structure, copy the data over before we call the
-       * completion function.
-       */
-       SRpnt = SCpnt->sc_request;
-       if( SRpnt != NULL ) {
-              SRpnt->sr_result = SRpnt->sr_command->result;
-              if( SRpnt->sr_result != 0 ) {
-                      memcpy(SRpnt->sr_sense_buffer,
-                             SRpnt->sr_command->sense_buffer,
-                             sizeof(SRpnt->sr_sense_buffer));
-              }
-       }
+    SCpnt->owner = SCSI_OWNER_HIGHLEVEL;
+    SCpnt->state = SCSI_STATE_FINISHED;
+    
+    /* We can get here with use_sg=0, causing a panic in the 
+       upper level (DB) */
+    SCpnt->use_sg = SCpnt->old_use_sg;
+
+    /*
+     * If there is an associated request structure, copy the data over 
+     * before we call the * completion function.
+     */
+    SRpnt = SCpnt->sc_request;
+
+    if( SRpnt != NULL ) {
+        if(!SRpnt->sr_command) { 
+            printk("scsi_finish_command: SRpnt=%p, SRpnt->sr_command=%p\n", 
+                   SRpnt, SRpnt->sr_command); 
+            printk("SRpnt->freeaddr = %p\n", SRpnt->freeaddr); 
+            BUG(); 
+        }
+        SRpnt->sr_result = SRpnt->sr_command->result;
+        if( SRpnt->sr_result != 0 ) {
+            memcpy(SRpnt->sr_sense_buffer,
+                   SRpnt->sr_command->sense_buffer,
+                   sizeof(SRpnt->sr_sense_buffer));
+        }
+    }
 
-       SCpnt->done(SCpnt);
+    SCpnt->done(SCpnt);
 }
 
 static int scsi_register_host(Scsi_Host_Template *);
@@ -1826,7 +1920,9 @@ static int proc_scsi_gen_write(struct file * file, const char * buf,
                         */
                         if (HBA_ptr->hostt->revoke)
                                 HBA_ptr->hostt->revoke(scd);
+#ifdef DEVFS_MUST_DIE
                        devfs_unregister (scd->de);
+#endif
                        scsi_release_commandblocks(scd);
 
                        /* Now we can remove the device structure */
@@ -1859,164 +1955,168 @@ out:
  */
 static int scsi_register_host(Scsi_Host_Template * tpnt)
 {
-       int pcount;
-       struct Scsi_Host *shpnt;
-       Scsi_Device *SDpnt;
-       struct Scsi_Device_Template *sdtpnt;
-       const char *name;
-       unsigned long flags;
-       int out_of_space = 0;
-
-       if (tpnt->next || !tpnt->detect)
-               return 1;       /* Must be already loaded, or
-                                * no detect routine available
-                                */
-
-       /* If max_sectors isn't set, default to max */
-       if (!tpnt->max_sectors)
-               tpnt->max_sectors = MAX_SECTORS;
-
-       pcount = next_scsi_host;
-
-       MOD_INC_USE_COUNT;
-
-       /* The detect routine must carefully spinunlock/spinlock if 
-          it enables interrupts, since all interrupt handlers do 
-          spinlock as well.
-          All lame drivers are going to fail due to the following 
-          spinlock. For the time beeing let's use it only for drivers 
-          using the new scsi code. NOTE: the detect routine could
-          redefine the value tpnt->use_new_eh_code. (DB, 13 May 1998) */
+    int pcount;
+    struct Scsi_Host *shpnt;
+    Scsi_Device *SDpnt;
+    struct Scsi_Device_Template *sdtpnt;
+    const char *name;
+    unsigned long flags;
+    int out_of_space = 0;
+
+    if (tpnt->next || !tpnt->detect)
+        return 1;      /* Must be already loaded, or
+                         * no detect routine available
+                         */
 
-       if (tpnt->use_new_eh_code) {
-               spin_lock_irqsave(&io_request_lock, flags);
-               tpnt->present = tpnt->detect(tpnt);
-               spin_unlock_irqrestore(&io_request_lock, flags);
-       } else
-               tpnt->present = tpnt->detect(tpnt);
-
-       if (tpnt->present) {
-               if (pcount == next_scsi_host) {
-                       if (tpnt->present > 1) {
-                               printk(KERN_ERR "scsi: Failure to register low-level scsi driver");
-                               scsi_unregister_host(tpnt);
-                               return 1;
-                       }
-                       /* 
-                        * The low-level driver failed to register a driver.
-                        * We can do this now.
-                        */
-                       if(scsi_register(tpnt, 0)==NULL)
-                       {
-                               printk(KERN_ERR "scsi: register failed.\n");
-                               scsi_unregister_host(tpnt);
-                               return 1;
-                       }
-               }
-               tpnt->next = scsi_hosts;        /* Add to the linked list */
-               scsi_hosts = tpnt;
+    /* If max_sectors isn't set, default to max */
+    if (!tpnt->max_sectors)
+        tpnt->max_sectors = MAX_SECTORS;
+
+    pcount = next_scsi_host;
+
+    MOD_INC_USE_COUNT;
+
+    /* The detect routine must carefully spinunlock/spinlock if 
+       it enables interrupts, since all interrupt handlers do 
+       spinlock as well.
+       All lame drivers are going to fail due to the following 
+       spinlock. For the time beeing let's use it only for drivers 
+       using the new scsi code. NOTE: the detect routine could
+       redefine the value tpnt->use_new_eh_code. (DB, 13 May 1998) */
+
+    if (tpnt->use_new_eh_code) {
+        spin_lock_irqsave(&io_request_lock, flags);
+        tpnt->present = tpnt->detect(tpnt);
+        spin_unlock_irqrestore(&io_request_lock, flags);
+    } else
+        tpnt->present = tpnt->detect(tpnt);
+
+    if (tpnt->present) {
+        if (pcount == next_scsi_host) {
+            if (tpnt->present > 1) {
+                printk(KERN_ERR "scsi: Failure to register low-level "
+                       "scsi driver");
+                scsi_unregister_host(tpnt);
+                return 1;
+            }
+            /* 
+             * The low-level driver failed to register a driver.
+             * We can do this now.
+             */
+            if(scsi_register(tpnt, 0)==NULL)
+            {
+                printk(KERN_ERR "scsi: register failed.\n");
+                scsi_unregister_host(tpnt);
+                return 1;
+            }
+        }
+        tpnt->next = scsi_hosts;       /* Add to the linked list */
+        scsi_hosts = tpnt;
 
-               /* Add the new driver to /proc/scsi */
+        /* Add the new driver to /proc/scsi */
 #ifdef CONFIG_PROC_FS
-               build_proc_dir_entries(tpnt);
+        build_proc_dir_entries(tpnt);
 #endif
 
 
-               /*
-                * Add the kernel threads for each host adapter that will
-                * handle error correction.
-                */
-               for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
-                       if (shpnt->hostt == tpnt && shpnt->hostt->use_new_eh_code) {
-                               DECLARE_MUTEX_LOCKED(sem);
+#if 0
+        /*
+         * Add the kernel threads for each host adapter that will
+         * handle error correction.
+         */
+        for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
+            if (shpnt->hostt == tpnt && shpnt->hostt->use_new_eh_code) {
+                DECLARE_MUTEX_LOCKED(sem);
 
-                               shpnt->eh_notify = &sem;
-                               kernel_thread((int (*)(void *)) scsi_error_handler,
-                                             (void *) shpnt, 0);
+                shpnt->eh_notify = &sem;
+                kernel_thread((int (*)(void *)) scsi_error_handler,
+                              (void *) shpnt, 0);
 
                                /*
                                 * Now wait for the kernel error thread to initialize itself
                                 * as it might be needed when we scan the bus.
                                 */
-                               down(&sem);
-                               shpnt->eh_notify = NULL;
-                       }
-               }
+                down(&sem);
+                shpnt->eh_notify = NULL;
+            }
+        }
+#endif
 
-               for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
-                       if (shpnt->hostt == tpnt) {
-                               if (tpnt->info) {
-                                       name = tpnt->info(shpnt);
-                               } else {
-                                       name = tpnt->name;
-                               }
-                               printk(KERN_INFO "scsi%d : %s\n",               /* And print a little message */
-                                      shpnt->host_no, name);
-                       }
-               }
+        for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
+            if (shpnt->hostt == tpnt) {
+                if (tpnt->info) {
+                    name = tpnt->info(shpnt);
+                } else {
+                    name = tpnt->name;
+                }
+                printk(KERN_INFO "scsi%d : %s\n",              /* And print a little message */
+                       shpnt->host_no, name);
+            }
+        }
 
-               /* The next step is to call scan_scsis here.  This generates the
-                * Scsi_Devices entries
-                */
-               for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
-                       if (shpnt->hostt == tpnt) {
-                               scan_scsis(shpnt, 0, 0, 0, 0);
-                               if (shpnt->select_queue_depths != NULL) {
-                                       (shpnt->select_queue_depths) (shpnt, shpnt->host_queue);
-                               }
-                       }
-               }
+        /* The next step is to call scan_scsis here.  This generates the
+         * Scsi_Devices entries
+         */
+        for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
+            if (shpnt->hostt == tpnt) {
+                scan_scsis(shpnt, 0, 0, 0, 0);
+                if (shpnt->select_queue_depths != NULL) {
+                    (shpnt->select_queue_depths) (shpnt, shpnt->host_queue);
+                }
+            }
+        }
 
-               for (sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next) {
-                       if (sdtpnt->init && sdtpnt->dev_noticed)
-                               (*sdtpnt->init) ();
-               }
+        for (sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next) {
+            if (sdtpnt->init && sdtpnt->dev_noticed)
+                (*sdtpnt->init) ();
+        }
 
-               /*
-                * Next we create the Scsi_Cmnd structures for this host 
-                */
-               for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
-                       for (SDpnt = shpnt->host_queue; SDpnt; SDpnt = SDpnt->next)
-                               if (SDpnt->host->hostt == tpnt) {
-                                       for (sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next)
-                                               if (sdtpnt->attach)
-                                                       (*sdtpnt->attach) (SDpnt);
-                                       if (SDpnt->attached) {
-                                               scsi_build_commandblocks(SDpnt);
-                                               if (0 == SDpnt->has_cmdblocks)
-                                                       out_of_space = 1;
-                                       }
-                               }
-               }
+        /*
+         * Next we create the Scsi_Cmnd structures for this host 
+         */
+        for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
+            for (SDpnt = shpnt->host_queue; SDpnt; SDpnt = SDpnt->next)
+                if (SDpnt->host->hostt == tpnt) {
+                    for (sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next)
+                        if (sdtpnt->attach)
+                            (*sdtpnt->attach) (SDpnt);
+                    if (SDpnt->attached) {
+                        scsi_build_commandblocks(SDpnt);
+                        if (0 == SDpnt->has_cmdblocks)
+                            out_of_space = 1;
+                    }
+                }
+        }
 
-               /*
-                * Now that we have all of the devices, resize the DMA pool,
-                * as required.  */
-               if (!out_of_space)
-                       scsi_resize_dma_pool();
+        /*
+         * Now that we have all of the devices, resize the DMA pool,
+         * as required.  */
+        if (!out_of_space)
+            scsi_resize_dma_pool();
 
 
-               /* This does any final handling that is required. */
-               for (sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next) {
-                       if (sdtpnt->finish && sdtpnt->nr_dev) {
-                               (*sdtpnt->finish) ();
-                       }
-               }
-       }
+        /* This does any final handling that is required. */
+        for (sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next) {
+            if (sdtpnt->finish && sdtpnt->nr_dev) {
+                (*sdtpnt->finish) ();
+            }
+        }
+    }
 #if defined(USE_STATIC_SCSI_MEMORY)
-       printk("SCSI memory: total %ldKb, used %ldKb, free %ldKb.\n",
-              (scsi_memory_upper_value - scsi_memory_lower_value) / 1024,
-              (scsi_init_memory_start - scsi_memory_lower_value) / 1024,
-              (scsi_memory_upper_value - scsi_init_memory_start) / 1024);
+    printk("SCSI memory: total %ldKb, used %ldKb, free %ldKb.\n",
+           (scsi_memory_upper_value - scsi_memory_lower_value) / 1024,
+           (scsi_init_memory_start - scsi_memory_lower_value) / 1024,
+           (scsi_memory_upper_value - scsi_init_memory_start) / 1024);
 #endif
 
-       if (out_of_space) {
-               scsi_unregister_host(tpnt);     /* easiest way to clean up?? */
-               return 1;
-       } else
-               return 0;
+    if (out_of_space) {
+        scsi_unregister_host(tpnt);    /* easiest way to clean up?? */
+        return 1;
+    } else
+        return 0;
 }
 
+
 /*
  * Similarly, this entry point should be called by a loadable module if it
  * is trying to remove a low level scsi driver from the system.
@@ -2033,8 +2133,10 @@ static int scsi_unregister_host(Scsi_Host_Template * tpnt)
        struct Scsi_Host *shpnt;
        char name[10];  /* host_no>=10^9? I don't think so. */
 
+#if 0
        /* get the big kernel lock, so we don't race with open() */
        lock_kernel();
+#endif
 
        /*
         * First verify that this host adapter is completely free with no pending
@@ -2126,10 +2228,13 @@ static int scsi_unregister_host(Scsi_Host_Template * tpnt)
                                printk(KERN_ERR "Attached usage count = %d\n", SDpnt->attached);
                                goto err_out;
                        }
+#ifdef DEVFS_MUST_DIE
                        devfs_unregister (SDpnt->de);
+#endif
                }
        }
 
+#if 0
        /*
         * Next, kill the kernel error recovery thread for this host.
         */
@@ -2145,6 +2250,7 @@ static int scsi_unregister_host(Scsi_Host_Template * tpnt)
                        shpnt->eh_notify = NULL;
                }
        }
+#endif
 
        /* Next we free up the Scsi_Cmnd structures for this host */
 
@@ -2175,7 +2281,9 @@ static int scsi_unregister_host(Scsi_Host_Template * tpnt)
                pcount = next_scsi_host;
                /* Remove the /proc/scsi directory entry */
                sprintf(name,"%d",shpnt->host_no);
+#ifdef CONFIG_PROC_FS
                remove_proc_entry(name, tpnt->proc_dir);
+#endif
                if (tpnt->release)
                        (*tpnt->release) (shpnt);
                else {
@@ -2185,8 +2293,11 @@ static int scsi_unregister_host(Scsi_Host_Template * tpnt)
                         */
                        if (shpnt->irq)
                                free_irq(shpnt->irq, NULL);
+
+#if 0
                        if (shpnt->dma_channel != 0xff)
                                free_dma(shpnt->dma_channel);
+#endif
                        if (shpnt->io_port && shpnt->n_io_port)
                                release_region(shpnt->io_port, shpnt->n_io_port);
                }
@@ -2225,7 +2336,9 @@ static int scsi_unregister_host(Scsi_Host_Template * tpnt)
                while ((SHT = *SHTp) != NULL) {
                        if (SHT == tpnt) {
                                *SHTp = SHT->next;
+#ifdef CONFIG_PROC_FS
                                remove_proc_entry(tpnt->proc_name, proc_scsi);
+#endif
                                break;
                        }
                        SHTp = &SHT->next;
@@ -2233,11 +2346,16 @@ static int scsi_unregister_host(Scsi_Host_Template * tpnt)
        }
        MOD_DEC_USE_COUNT;
 
+#if 0
        unlock_kernel();
+#endif
        return 0;
 
 err_out:
+
+#if 0
        unlock_kernel();
+#endif
        return -1;
 }
 
@@ -2331,7 +2449,9 @@ static int scsi_unregister_device(struct Scsi_Device_Template *tpnt)
        Scsi_Device *SDpnt;
        struct Scsi_Host *shpnt;
 
+#if 0
        lock_kernel();
+#endif
        /*
         * If we are busy, this is not going to fly.
         */
@@ -2364,14 +2484,19 @@ static int scsi_unregister_device(struct Scsi_Device_Template *tpnt)
        scsi_deregister_device(tpnt);
 
        MOD_DEC_USE_COUNT;
+#if 0
        unlock_kernel();
+#endif
+
        /*
         * Final cleanup for the driver is done in the driver sources in the
         * cleanup function.
         */
        return 0;
 error_out:
+#if 0
        unlock_kernel();
+#endif
        return -1;
 }
 
@@ -2571,21 +2696,33 @@ int __init scsi_setup(char *str)
 __setup("scsihosts=", scsi_setup);
 #endif
 
+static spinlock_t slock2 = SPIN_LOCK_UNLOCKED; 
+
 static int __init init_scsi(void)
 {
+#ifdef CONFIG_PROC_FS
        struct proc_dir_entry *generic;
+#endif
 
        printk(KERN_INFO "SCSI subsystem driver " REVISION "\n");
 
+        {
+            unsigned long flags; 
+            
+            spin_lock_irqsave(&slock2, flags); 
+            spin_unlock_irqrestore(&slock2, flags); 
+            printk("SCSI start of day -- flags = %lx\n", flags); 
+        }
+
         if( scsi_init_minimal_dma_pool() != 0 )
         {
                 return 1;
         }
 
+#ifdef CONFIG_PROC_FS
        /*
         * This makes /proc/scsi and /proc/scsi/scsi visible.
         */
-#ifdef CONFIG_PROC_FS
        proc_scsi = proc_mkdir("scsi", 0);
        if (!proc_scsi) {
                printk (KERN_ERR "cannot init /proc/scsi\n");
@@ -2600,7 +2737,9 @@ static int __init init_scsi(void)
        generic->write_proc = proc_scsi_gen_write;
 #endif
 
+#ifdef DEVFS_MUST_DIE
         scsi_devfs_handle = devfs_mk_dir (NULL, "scsi", NULL);
+#endif
         if (scsihosts)
                printk(KERN_INFO "scsi: host order: %s\n", scsihosts);  
        scsi_host_no_init (scsihosts);
@@ -2610,6 +2749,15 @@ static int __init init_scsi(void)
         */
        init_bh(SCSI_BH, scsi_bottom_half_handler);
 
+        {
+            unsigned long flags; 
+            
+            spin_lock_irqsave(&slock2, flags); 
+            spin_unlock_irqrestore(&slock2, flags); 
+            printk("SCSI end of day -- flags = %lx\n", flags); 
+        }
+
+
        return 0;
 }
 
@@ -2619,7 +2767,9 @@ static void __exit exit_scsi(void)
 
        remove_bh(SCSI_BH);
 
+#ifdef DEVFS_MUST_DIE
         devfs_unregister (scsi_devfs_handle);
+#endif
         for (shn = scsi_host_no_list;shn;shn = shn->next) {
                if (shn->name)
                        kfree(shn->name);
@@ -2692,10 +2842,12 @@ Scsi_Device * scsi_get_host_dev(struct Scsi_Host * SHpnt)
 
        SDpnt->online = TRUE;
 
+#if 0
         /*
          * Initialize the object that we will use to wait for command blocks.
          */
        init_waitqueue_head(&SDpnt->scpnt_wait);
+#endif
         return SDpnt;
 }
 
@@ -2814,11 +2966,13 @@ scsi_reset_provider(Scsi_Device *dev, int flag)
        if (dev->host->hostt->use_new_eh_code) {
                rtn = scsi_new_reset(SCpnt, flag);
        } else {
+#if 0
                unsigned long flags;
 
                spin_lock_irqsave(&io_request_lock, flags);
                rtn = scsi_old_reset(SCpnt, flag);
                spin_unlock_irqrestore(&io_request_lock, flags);
+#endif
        }
 
        scsi_delete_timer(SCpnt);
index f8199d5274f1d8c2cf2ecb91d3b4358db63115cc..338bca8f7b9e4758d756db8bbe0b1fc1bed270ca 100644 (file)
@@ -558,8 +558,11 @@ struct scsi_device {
         */
        struct scsi_device *next;       /* Used for linked list */
        struct scsi_device *prev;       /* Used for linked list */
+#if 0
        wait_queue_head_t   scpnt_wait; /* Used to wait if
                                           device is busy */
+#endif
+
        struct Scsi_Host *host;
        request_queue_t request_queue;
         atomic_t                device_active; /* commands checked out for device */
@@ -580,7 +583,9 @@ struct scsi_device {
        int access_count;       /* Count of open channels/mounts */
 
        void *hostdata;         /* available to low-level driver */
+#if 0
        devfs_handle_t de;      /* directory for the device      */
+#endif
        char type;
        char scsi_level;
        char vendor[8], model[16], rev[4];
@@ -650,29 +655,32 @@ typedef struct scsi_pointer {
  * of the queue and being sent to the driver.
  */
 struct scsi_request {
-       int     sr_magic;
-       int     sr_result;      /* Status code from lower level driver */
-       unsigned char sr_sense_buffer[SCSI_SENSE_BUFFERSIZE];           /* obtained by REQUEST SENSE
-                                                * when CHECK CONDITION is
-                                                * received on original command 
-                                                * (auto-sense) */
-
-       struct Scsi_Host *sr_host;
-       Scsi_Device *sr_device;
-       Scsi_Cmnd *sr_command;
-       struct request sr_request;      /* A copy of the command we are
+    int     sr_magic;
+    int     sr_result; /* Status code from lower level driver */
+    unsigned char sr_sense_buffer[SCSI_SENSE_BUFFERSIZE]; 
+    /* obtained by REQUEST SENSE when CHECK CONDITION is received 
+       on original command (auto-sense) */
+    
+    struct Scsi_Host *sr_host;
+    Scsi_Device *sr_device;
+    Scsi_Cmnd *sr_command;
+#define SMHHACK
+#ifdef SMHHACK 
+    void *freeaddr; 
+#endif
+    struct request sr_request; /* A copy of the command we are
                                   working on */
-       unsigned sr_bufflen;    /* Size of data buffer */
-       void *sr_buffer;                /* Data buffer */
-       int sr_allowed;
-       unsigned char sr_data_direction;
-       unsigned char sr_cmd_len;
-       unsigned char sr_cmnd[MAX_COMMAND_SIZE];
-       void (*sr_done) (struct scsi_cmnd *);   /* Mid-level done function */
-       int sr_timeout_per_command;
-       unsigned short sr_use_sg;       /* Number of pieces of scatter-gather */
-       unsigned short sr_sglist_len;   /* size of malloc'd scatter-gather list */
-       unsigned sr_underflow;  /* Return error if less than
+    unsigned sr_bufflen;       /* Size of data buffer */
+    void *sr_buffer;           /* Data buffer */
+    int sr_allowed;
+    unsigned char sr_data_direction;
+    unsigned char sr_cmd_len;
+    unsigned char sr_cmnd[MAX_COMMAND_SIZE];
+    void (*sr_done) (struct scsi_cmnd *);      /* Mid-level done function */
+    int sr_timeout_per_command;
+    unsigned short sr_use_sg;  /* Number of pieces of scatter-gather */
+    unsigned short sr_sglist_len;      /* size of malloc'd scatter-gather list */
+    unsigned sr_underflow;     /* Return error if less than
                                   this amount is transferred */
 };
 
@@ -830,6 +838,7 @@ struct scsi_cmnd {
 #define SCSI_MLQUEUE_HOST_BUSY   0x1055
 #define SCSI_MLQUEUE_DEVICE_BUSY 0x1056
 
+#if 0
 #define SCSI_SLEEP(QUEUE, CONDITION) {             \
     if (CONDITION) {                               \
        DECLARE_WAITQUEUE(wait, current);           \
@@ -848,6 +857,12 @@ struct scsi_cmnd {
        remove_wait_queue(QUEUE, &wait);\
        current->state = TASK_RUNNING;  \
     }; }
+#else
+#define SCSI_SLEEP(QUEUE, CONDITION) { printk("SCSI_SLEEP!\n"); BUG(); } 
+#endif
+
+
+
 
 /*
  * old style reset request from external source
index 559482861615397ef9a65ee8d3efbb3821da3bac..94c2118da0f03547e19a2660b35368c8e2556718 100644 (file)
@@ -6,9 +6,9 @@
  */
 
 #define __NO_VERSION__
-#include <linux/config.h>
-#include <linux/module.h>
-#include <linux/blk.h>
+#include <xeno/config.h>
+#include <xeno/module.h>
+#include <xeno/blk.h>
 
 
 #include "scsi.h"
@@ -165,6 +165,7 @@ int scsi_free(void *obj, unsigned int len)
                }
        }
        panic("scsi_free:Bad offset");
+       return -1; 
 }
 
 
@@ -206,7 +207,8 @@ void scsi_resize_dma_pool(void)
                 * Free up the DMA pool.
                 */
                if (scsi_dma_free_sectors != dma_sectors)
-                       panic("SCSI DMA pool memory leak %d %d\n", scsi_dma_free_sectors, dma_sectors);
+                       panic("SCSI DMA pool memory leak %d %d\n", 
+                             scsi_dma_free_sectors, dma_sectors);
 
                for (i = 0; i < dma_sectors / SECTORS_PER_PAGE; i++)
                        free_pages((unsigned long) dma_malloc_pages[i], 0);
@@ -225,9 +227,11 @@ void scsi_resize_dma_pool(void)
 
        new_dma_sectors = 2 * SECTORS_PER_PAGE;         /* Base value we use */
 
+#if 0 
        if (__pa(high_memory) - 1 > ISA_DMA_THRESHOLD)
                need_isa_bounce_buffers = 1;
        else
+#endif
                need_isa_bounce_buffers = 0;
 
        if (scsi_devicelist)
index e4a69bc8358f5ba9b781b8b9ab690ef9c579f243..6c043937bed929ea311813fd347401ff5ecd500e 100644 (file)
@@ -9,23 +9,23 @@
  */
 
 #define __NO_VERSION__
-#include <linux/module.h>
-
-#include <linux/sched.h>
-#include <linux/timer.h>
-#include <linux/string.h>
-#include <linux/slab.h>
-#include <linux/ioport.h>
-#include <linux/kernel.h>
-#include <linux/stat.h>
-#include <linux/blk.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <linux/smp_lock.h>
+#include <xeno/module.h>
+
+#include <xeno/sched.h>
+#include <xeno/timer.h>
+/*#include <xeno/string.h>*/
+#include <xeno/slab.h>
+#include <xeno/ioport.h>
+#include <xeno/kernel.h>
+/*#include <xeno/stat.h>*/
+#include <xeno/blk.h>
+#include <xeno/interrupt.h>
+#include <xeno/delay.h>
+/*#include <xeno/smp_lock.h>*/
 
 #define __KERNEL_SYSCALLS__
 
-#include <linux/unistd.h>
+/*#include <xeno/unistd.h>*/
 
 #include <asm/system.h>
 #include <asm/irq.h>
@@ -226,6 +226,7 @@ void scsi_times_out(Scsi_Cmnd * SCpnt)
                                   SCpnt->host->host_busy,
                                   SCpnt->host->host_failed));
 
+#if 0
        /*
         * If the host is having troubles, then look to see if this was the last
         * command that might have failed.  If so, wake up the error handler.
@@ -237,6 +238,7 @@ void scsi_times_out(Scsi_Cmnd * SCpnt)
        if (SCpnt->host->host_busy == SCpnt->host->host_failed) {
                up(SCpnt->host->eh_wait);
        }
+#endif
 }
 
 /*
@@ -283,9 +285,11 @@ void scsi_eh_times_out(Scsi_Cmnd * SCpnt)
        SCpnt->eh_state = SCSI_STATE_TIMEOUT;
        SCSI_LOG_ERROR_RECOVERY(5, printk("In scsi_eh_times_out %p\n", SCpnt));
 
+#if 0
        if (SCpnt->host->eh_action != NULL)
                up(SCpnt->host->eh_action);
        else
+#endif
                printk("Missing scsi error handler thread\n");
 }
 
@@ -329,8 +333,10 @@ void scsi_eh_done(Scsi_Cmnd * SCpnt)
        SCSI_LOG_ERROR_RECOVERY(5, printk("In eh_done %p result:%x\n", SCpnt,
                                          SCpnt->result));
 
+#if 0
        if (SCpnt->host->eh_action != NULL)
                up(SCpnt->host->eh_action);
+#endif
 }
 
 /*
@@ -352,9 +358,10 @@ void scsi_eh_action_done(Scsi_Cmnd * SCpnt, int answer)
 
        SCpnt->owner = SCSI_OWNER_ERROR_HANDLER;
        SCpnt->eh_state = (answer ? SUCCESS : FAILED);
-
+#if 0
        if (SCpnt->host->eh_action != NULL)
                up(SCpnt->host->eh_action);
+#endif
 }
 
 /*
@@ -548,18 +555,26 @@ STATIC int scsi_test_unit_ready(Scsi_Cmnd * SCpnt)
 STATIC
 void scsi_sleep_done(struct semaphore *sem)
 {
+#if 0
        if (sem != NULL) {
                up(sem);
        }
+#endif
 }
 
 void scsi_sleep(int timeout)
 {
+#if 0 
        DECLARE_MUTEX_LOCKED(sem);
+#endif
        struct timer_list timer;
 
        init_timer(&timer);
+#if 0
        timer.data = (unsigned long) &sem;
+#else 
+        timer.data = 0xDEADBEEF; 
+#endif
        timer.expires = jiffies + timeout;
        timer.function = (void (*)(unsigned long)) scsi_sleep_done;
 
@@ -567,7 +582,9 @@ void scsi_sleep(int timeout)
 
        add_timer(&timer);
 
+#if 0
        down(&sem);
+#endif
        del_timer(&timer);
 }
 
@@ -597,23 +614,29 @@ STATIC void scsi_send_eh_cmnd(Scsi_Cmnd * SCpnt, int timeout)
        SCpnt->owner = SCSI_OWNER_LOWLEVEL;
 
        if (host->can_queue) {
+#if 0
                DECLARE_MUTEX_LOCKED(sem);
+#endif
 
                SCpnt->eh_state = SCSI_STATE_QUEUED;
 
                scsi_add_timer(SCpnt, timeout, scsi_eh_times_out);
 
+#if 0
                /*
                 * Set up the semaphore so we wait for the command to complete.
                 */
                SCpnt->host->eh_action = &sem;
+#endif
                SCpnt->request.rq_status = RQ_SCSI_BUSY;
 
                spin_lock_irqsave(&io_request_lock, flags);
                host->hostt->queuecommand(SCpnt, scsi_eh_done);
                spin_unlock_irqrestore(&io_request_lock, flags);
 
+#if 0
                down(&sem);
+#endif
 
                SCpnt->host->eh_action = NULL;
 
@@ -1246,8 +1269,9 @@ STATIC void scsi_restart_operations(struct Scsi_Host *host)
         * block devices.
         */
        SCSI_LOG_ERROR_RECOVERY(5, printk("scsi_error.c: Waking up host to restart\n"));
-
+#if 0
        wake_up(&host->host_wait);
+#endif
 
        /*
         * Finally we need to re-initiate requests that may be pending.  We will
@@ -1848,6 +1872,7 @@ void scsi_error_handler(void *data)
 {
        struct Scsi_Host *host = (struct Scsi_Host *) data;
        int rtn;
+#if 0
        DECLARE_MUTEX_LOCKED(sem);
 
         /*
@@ -1877,16 +1902,23 @@ void scsi_error_handler(void *data)
        sprintf(current->comm, "scsi_eh_%d", host->host_no);
 
        host->eh_wait = &sem;
+#else
+       host->eh_wait = (void *)0xDEADBEEF; 
+#endif
        host->ehandler = current;
 
+#if 0
        unlock_kernel();
+#endif
 
        /*
         * Wake up the thread that created us.
         */
        SCSI_LOG_ERROR_RECOVERY(3, printk("Wake up parent %d\n", host->eh_notify->count.counter));
 
+#if 0
        up(host->eh_notify);
+#endif
 
        while (1) {
                /*
@@ -1896,6 +1928,7 @@ void scsi_error_handler(void *data)
                 */
                SCSI_LOG_ERROR_RECOVERY(1, printk("Error handler sleeping\n"));
 
+#if 0
                /*
                 * Note - we always use down_interruptible with the semaphore
                 * even if the module was loaded as part of the kernel.  The
@@ -1906,6 +1939,7 @@ void scsi_error_handler(void *data)
                 * semaphores isn't unreasonable.
                 */
                down_interruptible(&sem);
+#endif
                if( host->loaded_as_module ) {
                        if (signal_pending(current))
                                break;
@@ -1956,6 +1990,7 @@ void scsi_error_handler(void *data)
        host->eh_active = 0;
        host->ehandler = NULL;
 
+#if 0
        /*
         * If anyone is waiting for us to exit (i.e. someone trying to unload
         * a driver), then wake up that process to let them know we are on
@@ -1966,6 +2001,7 @@ void scsi_error_handler(void *data)
         */
        if (host->eh_notify != NULL)
                up(host->eh_notify);
+#endif
 }
 
 /*
index 6f891de66bfb71c082c819c757117b18af78a1da..7f1df6e8f1881d90f8d3bfc90b30aab2a30997ce 100644 (file)
@@ -5,21 +5,21 @@
  *   for the ones that remain
  */
 #define __NO_VERSION__
-#include <linux/module.h>
+#include <xeno/module.h>
 
 #include <asm/io.h>
 #include <asm/uaccess.h>
 #include <asm/system.h>
 #include <asm/page.h>
 
-#include <linux/interrupt.h>
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/string.h>
+/*  #include <linux/interrupt.h> */
+/*  #include <linux/errno.h> */
+/*  #include <linux/kernel.h> */
+#include <xeno/sched.h>
+/*  #include <linux/mm.h> */
+/*  #include <linux/string.h> */
 
-#include <linux/blk.h>
+#include <xeno/blk.h>
 #include "scsi.h"
 #include "hosts.h"
 #include <scsi/scsi_ioctl.h>
index 53c1092a5739bf9ca9f4adb3e69f783de7efe352..8c32bf547fd784ab52ae574d79a03a6f41bfc47b 100644 (file)
  */
 
 #define __NO_VERSION__
-#include <linux/module.h>
-
-#include <linux/sched.h>
-#include <linux/timer.h>
-#include <linux/string.h>
-#include <linux/slab.h>
-#include <linux/ioport.h>
-#include <linux/kernel.h>
-#include <linux/stat.h>
-#include <linux/blk.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <linux/smp_lock.h>
-#include <linux/completion.h>
+#include <xeno/module.h>
+
+#include <xeno/sched.h>
+#include <xeno/timer.h>
+/*  #include <xeno/string.h> */
+/*  #include <xeno/slab.h> */
+/*  #include <xeno/ioport.h> */
+/*  #include <xeno/kernel.h> */
+/*  #include <xeno/stat.h> */
+#include <xeno/blk.h>
+/*  #include <xeno/interrupt.h> */
+/*  #include <xeno/delay.h> */
+/*  #include <xeno/smp_lock.h> */
+/*  #include <xeno/completion.h> */
 
 
 #define __KERNEL_SYSCALLS__
 
-#include <linux/unistd.h>
+/* #include <xeno/unistd.h> */
 
 #include <asm/system.h>
 #include <asm/irq.h>
@@ -47,6 +47,8 @@
 #include "constants.h"
 #include <scsi/scsi_ioctl.h>
 
+#define SPECIAL XEN_BLOCK_SPECIAL
+
 /*
  * This entire source file deals with the new queueing code.
  */
 static void __scsi_insert_special(request_queue_t *q, struct request *rq,
                                  void *data, int at_head)
 {
-       unsigned long flags;
-
-       ASSERT_LOCK(&io_request_lock, 0);
-
-       rq->cmd = SPECIAL;
-       rq->special = data;
-       rq->q = NULL;
-       rq->nr_segments = 0;
-       rq->elevator_sequence = 0;
-
-       /*
-        * We have the option of inserting the head or the tail of the queue.
-        * Typically we use the tail for new ioctls and so forth.  We use the
-        * head of the queue for things like a QUEUE_FULL message from a
-        * device, or a host that is unable to accept a particular command.
-        */
-       spin_lock_irqsave(&io_request_lock, flags);
-
-       if (at_head)
-               list_add(&rq->queue, &q->queue_head);
-       else
-               list_add_tail(&rq->queue, &q->queue_head);
-
-       q->request_fn(q);
-       spin_unlock_irqrestore(&io_request_lock, flags);
+    unsigned long flags;
+    
+    ASSERT_LOCK(&io_request_lock, 0);
+    
+    rq->cmd = SPECIAL;
+    rq->special = data;
+    rq->q = NULL;
+    rq->nr_segments = 0;
+    rq->elevator_sequence = 0;
+    
+    /*
+     * We have the option of inserting the head or the tail of the queue.
+     * Typically we use the tail for new ioctls and so forth.  We use the
+     * head of the queue for things like a QUEUE_FULL message from a
+     * device, or a host that is unable to accept a particular command.
+     */
+    spin_lock_irqsave(&io_request_lock, flags);
+    
+    if (at_head)
+       list_add(&rq->queue, &q->queue_head);
+    else
+       list_add_tail(&rq->queue, &q->queue_head);
+    
+    q->request_fn(q);
+    spin_unlock_irqrestore(&io_request_lock, flags);
 }
 
 
@@ -118,10 +120,10 @@ static void __scsi_insert_special(request_queue_t *q, struct request *rq,
  */
 int scsi_insert_special_cmd(Scsi_Cmnd * SCpnt, int at_head)
 {
-       request_queue_t *q = &SCpnt->device->request_queue;
-
-       __scsi_insert_special(q, &SCpnt->request, SCpnt, at_head);
-       return 0;
+    request_queue_t *q = &SCpnt->device->request_queue;
+    
+    __scsi_insert_special(q, &SCpnt->request, SCpnt, at_head);
+    return 0;
 }
 
 /*
@@ -146,10 +148,10 @@ int scsi_insert_special_cmd(Scsi_Cmnd * SCpnt, int at_head)
  */
 int scsi_insert_special_req(Scsi_Request * SRpnt, int at_head)
 {
-       request_queue_t *q = &SRpnt->sr_device->request_queue;
-
-       __scsi_insert_special(q, &SRpnt->sr_request, SRpnt, at_head);
-       return 0;
+    request_queue_t *q = &SRpnt->sr_device->request_queue;
+    
+    __scsi_insert_special(q, &SRpnt->sr_request, SRpnt, at_head);
+    return 0;
 }
 
 /*
@@ -167,44 +169,44 @@ int scsi_insert_special_req(Scsi_Request * SRpnt, int at_head)
  */
 int scsi_init_cmd_errh(Scsi_Cmnd * SCpnt)
 {
-       ASSERT_LOCK(&io_request_lock, 0);
-
-       SCpnt->owner = SCSI_OWNER_MIDLEVEL;
-       SCpnt->reset_chain = NULL;
-       SCpnt->serial_number = 0;
-       SCpnt->serial_number_at_timeout = 0;
-       SCpnt->flags = 0;
-       SCpnt->retries = 0;
-
-       SCpnt->abort_reason = 0;
-
-       memset((void *) SCpnt->sense_buffer, 0, sizeof SCpnt->sense_buffer);
-
-       if (SCpnt->cmd_len == 0)
-               SCpnt->cmd_len = COMMAND_SIZE(SCpnt->cmnd[0]);
-
-       /*
-        * We need saved copies of a number of fields - this is because
-        * error handling may need to overwrite these with different values
-        * to run different commands, and once error handling is complete,
-        * we will need to restore these values prior to running the actual
-        * command.
-        */
-       SCpnt->old_use_sg = SCpnt->use_sg;
-       SCpnt->old_cmd_len = SCpnt->cmd_len;
-       SCpnt->sc_old_data_direction = SCpnt->sc_data_direction;
-       SCpnt->old_underflow = SCpnt->underflow;
-       memcpy((void *) SCpnt->data_cmnd,
-              (const void *) SCpnt->cmnd, sizeof(SCpnt->cmnd));
-       SCpnt->buffer = SCpnt->request_buffer;
-       SCpnt->bufflen = SCpnt->request_bufflen;
-
-       SCpnt->reset_chain = NULL;
-
-       SCpnt->internal_timeout = NORMAL_TIMEOUT;
-       SCpnt->abort_reason = 0;
-
-       return 1;
+    ASSERT_LOCK(&io_request_lock, 0);
+    
+    SCpnt->owner = SCSI_OWNER_MIDLEVEL;
+    SCpnt->reset_chain = NULL;
+    SCpnt->serial_number = 0;
+    SCpnt->serial_number_at_timeout = 0;
+    SCpnt->flags = 0;
+    SCpnt->retries = 0;
+
+    SCpnt->abort_reason = 0;
+    
+    memset((void *) SCpnt->sense_buffer, 0, sizeof SCpnt->sense_buffer);
+    
+    if (SCpnt->cmd_len == 0)
+       SCpnt->cmd_len = COMMAND_SIZE(SCpnt->cmnd[0]);
+    
+    /*
+     * We need saved copies of a number of fields - this is because
+     * error handling may need to overwrite these with different values
+     * to run different commands, and once error handling is complete,
+     * we will need to restore these values prior to running the actual
+     * command.
+     */
+    SCpnt->old_use_sg = SCpnt->use_sg;
+    SCpnt->old_cmd_len = SCpnt->cmd_len;
+    SCpnt->sc_old_data_direction = SCpnt->sc_data_direction;
+    SCpnt->old_underflow = SCpnt->underflow;
+    memcpy((void *) SCpnt->data_cmnd,
+          (const void *) SCpnt->cmnd, sizeof(SCpnt->cmnd));
+    SCpnt->buffer = SCpnt->request_buffer;
+    SCpnt->bufflen = SCpnt->request_bufflen;
+    
+    SCpnt->reset_chain = NULL;
+    
+    SCpnt->internal_timeout = NORMAL_TIMEOUT;
+    SCpnt->abort_reason = 0;
+
+    return 1;
 }
 
 /*
@@ -245,90 +247,90 @@ int scsi_init_cmd_errh(Scsi_Cmnd * SCpnt)
  */
 void scsi_queue_next_request(request_queue_t * q, Scsi_Cmnd * SCpnt)
 {
-       int all_clear;
-       unsigned long flags;
-       Scsi_Device *SDpnt;
-       struct Scsi_Host *SHpnt;
-
-       ASSERT_LOCK(&io_request_lock, 0);
-
-       spin_lock_irqsave(&io_request_lock, flags);
-       if (SCpnt != NULL) {
-
-               /*
-                * For some reason, we are not done with this request.
-                * This happens for I/O errors in the middle of the request,
-                * in which case we need to request the blocks that come after
-                * the bad sector.
-                */
-               SCpnt->request.special = (void *) SCpnt;
-               list_add(&SCpnt->request.queue, &q->queue_head);
-       }
-
-       /*
-        * Just hit the requeue function for the queue.
-        */
-       q->request_fn(q);
-
-       SDpnt = (Scsi_Device *) q->queuedata;
-       SHpnt = SDpnt->host;
-
+    int all_clear;
+    unsigned long flags;
+    Scsi_Device *SDpnt;
+    struct Scsi_Host *SHpnt;
+    
+    ASSERT_LOCK(&io_request_lock, 0);
+    
+    spin_lock_irqsave(&io_request_lock, flags);
+    if (SCpnt != NULL) {
+       
        /*
-        * If this is a single-lun device, and we are currently finished
-        * with this device, then see if we need to get another device
-        * started.  FIXME(eric) - if this function gets too cluttered
-        * with special case code, then spin off separate versions and
-        * use function pointers to pick the right one.
+        * For some reason, we are not done with this request.
+        * This happens for I/O errors in the middle of the request,
+        * in which case we need to request the blocks that come after
+        * the bad sector.
         */
-       if (SDpnt->single_lun
-           && list_empty(&q->queue_head)
-           && SDpnt->device_busy == 0) {
-               request_queue_t *q;
-
-               for (SDpnt = SHpnt->host_queue;
-                    SDpnt;
-                    SDpnt = SDpnt->next) {
-                       if (((SHpnt->can_queue > 0)
-                            && (SHpnt->host_busy >= SHpnt->can_queue))
-                           || (SHpnt->host_blocked)
-                           || (SHpnt->host_self_blocked)
-                           || (SDpnt->device_blocked)) {
-                               break;
-                       }
-                       q = &SDpnt->request_queue;
-                       q->request_fn(q);
-               }
+       SCpnt->request.special = (void *) SCpnt;
+       list_add(&SCpnt->request.queue, &q->queue_head);
+    }
+    
+    /*
+     * Just hit the requeue function for the queue.
+     */
+    q->request_fn(q);
+    
+    SDpnt = (Scsi_Device *) q->queuedata;
+    SHpnt = SDpnt->host;
+    
+    /*
+     * If this is a single-lun device, and we are currently finished
+     * with this device, then see if we need to get another device
+     * started.  FIXME(eric) - if this function gets too cluttered
+     * with special case code, then spin off separate versions and
+     * use function pointers to pick the right one.
+     */
+    if (SDpnt->single_lun
+       && list_empty(&q->queue_head)
+       && SDpnt->device_busy == 0) {
+       request_queue_t *q;
+       
+       for (SDpnt = SHpnt->host_queue;
+            SDpnt;
+            SDpnt = SDpnt->next) {
+           if (((SHpnt->can_queue > 0)
+                && (SHpnt->host_busy >= SHpnt->can_queue))
+               || (SHpnt->host_blocked)
+               || (SHpnt->host_self_blocked)
+               || (SDpnt->device_blocked)) {
+               break;
+           }
+           q = &SDpnt->request_queue;
+           q->request_fn(q);
        }
-
-       /*
-        * Now see whether there are other devices on the bus which
-        * might be starved.  If so, hit the request function.  If we
-        * don't find any, then it is safe to reset the flag.  If we
-        * find any device that it is starved, it isn't safe to reset the
-        * flag as the queue function releases the lock and thus some
-        * other device might have become starved along the way.
-        */
-       all_clear = 1;
-       if (SHpnt->some_device_starved) {
-               for (SDpnt = SHpnt->host_queue; SDpnt; SDpnt = SDpnt->next) {
-                       request_queue_t *q;
-                       if ((SHpnt->can_queue > 0 && (SHpnt->host_busy >= SHpnt->can_queue))
-                           || (SHpnt->host_blocked) 
-                           || (SHpnt->host_self_blocked)) {
-                               break;
-                       }
-                       if (SDpnt->device_blocked || !SDpnt->starved) {
-                               continue;
-                       }
-                       q = &SDpnt->request_queue;
-                       q->request_fn(q);
-                       all_clear = 0;
-               }
-               if (SDpnt == NULL && all_clear) {
-                       SHpnt->some_device_starved = 0;
-               }
+    }
+    
+    /*
+     * Now see whether there are other devices on the bus which
+     * might be starved.  If so, hit the request function.  If we
+     * don't find any, then it is safe to reset the flag.  If we
+     * find any device that it is starved, it isn't safe to reset the
+     * flag as the queue function releases the lock and thus some
+     * other device might have become starved along the way.
+     */
+    all_clear = 1;
+    if (SHpnt->some_device_starved) {
+       for (SDpnt = SHpnt->host_queue; SDpnt; SDpnt = SDpnt->next) {
+           request_queue_t *q;
+           if ((SHpnt->can_queue > 0 &&(SHpnt->host_busy >= SHpnt->can_queue))
+               || (SHpnt->host_blocked) 
+               || (SHpnt->host_self_blocked)) {
+               break;
+           }
+           if (SDpnt->device_blocked || !SDpnt->starved) {
+               continue;
+           }
+           q = &SDpnt->request_queue;
+           q->request_fn(q);
+           all_clear = 0;
+       }
+       if (SDpnt == NULL && all_clear) {
+           SHpnt->some_device_starved = 0;
        }
-       spin_unlock_irqrestore(&io_request_lock, flags);
+    }
+    spin_unlock_irqrestore(&io_request_lock, flags);
 }
 
 /*
@@ -418,6 +420,7 @@ static Scsi_Cmnd *__scsi_end_request(Scsi_Cmnd * SCpnt,
                scsi_queue_next_request(q, SCpnt);
                return SCpnt;
        }
+#if 0
        /*
         * This request is done.  If there is someone blocked waiting for this
         * request, wake them up.  Typically used to wake up processes trying
@@ -426,6 +429,7 @@ static Scsi_Cmnd *__scsi_end_request(Scsi_Cmnd * SCpnt,
        if (req->waiting != NULL) {
                complete(req->waiting);
        }
+#endif
        req_finished_io(req);
        add_blkdev_randomness(MAJOR(req->rq_dev));
 
index 985e74a18c6bd96a6ee6067546baa4dfd9341b77..92306b3ec0a1cda18ec97cc85b2db0ab22644897 100644 (file)
  */
 
 #define __NO_VERSION__
-#include <linux/config.h>
-#include <linux/module.h>
-
-#include <linux/sched.h>
-#include <linux/timer.h>
-#include <linux/string.h>
-#include <linux/slab.h>
-#include <linux/ioport.h>
-#include <linux/kernel.h>
-#include <linux/stat.h>
-#include <linux/blk.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <linux/smp_lock.h>
+#include <xeno/config.h>
+#include <xeno/module.h>
+
+#include <xeno/sched.h>
+#include <xeno/timer.h>
+/*  #include <xeno/string.h> */
+/*  #include <xeno/slab.h> */
+/*  #include <xeno/ioport.h> */
+/*  #include <xeno/kernel.h> */
+/*  #include <xeno/stat.h> */
+#include <xeno/blk.h>
+/*  #include <xeno/interrupt.h> */
+/*  #include <xeno/delay.h> */
+/*  #include <xeno/smp_lock.h> */
 
 
 #define __KERNEL_SYSCALLS__
 
-#include <linux/unistd.h>
+/*  #include <xeno/unistd.h> */
 
 #include <asm/system.h>
 #include <asm/irq.h>
diff --git a/xen-2.4.16/drivers/scsi/scsi_module.c.inc b/xen-2.4.16/drivers/scsi/scsi_module.c.inc
new file mode 100644 (file)
index 0000000..24099e0
--- /dev/null
@@ -0,0 +1,71 @@
+/*
+ *  scsi_module.c Copyright (1994, 1995) Eric Youngdale.
+ *
+ * Support for loading low-level scsi drivers using the linux kernel loadable
+ * module interface.
+ *
+ * To use, the host adapter should first define and initialize the variable
+ * driver_template (datatype Scsi_Host_Template), and then include this file.
+ * This should also be wrapped in a #ifdef MODULE/#endif.
+ *
+ * The low -level driver must also define a release function which will
+ * free any irq assignments, release any dma channels, release any I/O
+ * address space that might be reserved, and otherwise clean up after itself.
+ * The idea is that the same driver should be able to be reloaded without
+ * any difficulty.  This makes debugging new drivers easier, as you should
+ * be able to load the driver, test it, unload, modify and reload.
+ *
+ * One *very* important caveat.  If the driver may need to do DMA on the
+ * ISA bus, you must have unchecked_isa_dma set in the device template,
+ * even if this might be changed during the detect routine.  This is
+ * because the shpnt structure will be allocated in a special way so that
+ * it will be below the appropriate DMA limit - thus if your driver uses
+ * the hostdata field of shpnt, and the board must be able to access this
+ * via DMA, the shpnt structure must be in a DMA accessible region of
+ * memory.  This comment would be relevant for something like the buslogic
+ * driver where there are many boards, only some of which do DMA onto the
+ * ISA bus.  There is no convenient way of specifying whether the host
+ * needs to be in a ISA DMA accessible region of memory when you call
+ * scsi_register.
+ */
+
+#include <xeno/module.h>
+#include <linux/init.h>
+
+static int __init init_this_scsi_driver(void)
+{
+       driver_template.module = THIS_MODULE;
+       scsi_register_module(MODULE_SCSI_HA, &driver_template);
+       if (driver_template.present)
+               return 0;
+
+       scsi_unregister_module(MODULE_SCSI_HA, &driver_template);
+       return -ENODEV;
+}
+
+static void __exit exit_this_scsi_driver(void)
+{
+       scsi_unregister_module(MODULE_SCSI_HA, &driver_template);
+}
+
+module_init(init_this_scsi_driver);
+module_exit(exit_this_scsi_driver);
+
+/*
+ * Overrides for Emacs so that we almost follow Linus's tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only.  This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-indent-level: 4
+ * c-brace-imaginary-offset: 0
+ * c-brace-offset: -4
+ * c-argdecl-indent: 4
+ * c-label-offset: -4
+ * c-continued-statement-offset: 4
+ * c-continued-brace-offset: 0
+ * indent-tabs-mode: nil
+ * tab-width: 8
+ * End:
+ */
index 01d6679250a954ce98ee59bd837294587eba9726..41a5f7cc491d1f6e92f08b839ed509df6206bced 100644 (file)
  * Michael A. Griffith <grif@acm.org>
  */
 
-#include <linux/config.h>      /* for CONFIG_PROC_FS */
+#include <xeno/config.h>       /* for CONFIG_PROC_FS */
 #define __NO_VERSION__
-#include <linux/module.h>
-
-#include <linux/string.h>
-#include <linux/mm.h>
-#include <linux/slab.h>
-#include <linux/proc_fs.h>
-#include <linux/errno.h>
-#include <linux/stat.h>
-#include <linux/blk.h>
+#include <xeno/module.h>
+
+/*  #include <xeno/string.h> */
+/*  #include <xeno/mm.h> */
+/*  #include <xeno/slab.h> */
+/*  #include <xeno/proc_fs.h> */
+/*  #include <xeno/errno.h> */
+/*  #include <xeno/stat.h> */
+#include <xeno/blk.h>
 
 #include <asm/uaccess.h>
 
index c41f1ce069282407f66bf8ea76a204e33cc97746..ce790c9d11815134e385edcc4ccb96a74108d9cb 100644 (file)
  */
 
 #define __NO_VERSION__
-#include <linux/module.h>
-
-#include <linux/sched.h>
-#include <linux/timer.h>
-#include <linux/string.h>
-#include <linux/slab.h>
-#include <linux/ioport.h>
-#include <linux/kernel.h>
-#include <linux/stat.h>
-#include <linux/blk.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <linux/smp_lock.h>
+#include <xeno/module.h>
+
+#include <xeno/sched.h>
+#include <xeno/timer.h>
+/*  #include <xeno/string.h> */
+/*  #include <xeno/slab.h> */
+/*  #include <xeno/ioport.h> */
+/*  #include <xeno/kernel.h> */
+/*  #include <xeno/stat.h> */
+#include <xeno/blk.h>
+/*  #include <xeno/interrupt.h> */
+/*  #include <xeno/delay.h> */
+/*  #include <xeno/smp_lock.h> */
 
 #define __KERNEL_SYSCALLS__
 
-#include <linux/unistd.h>
+/*#include <xeno/unistd.h>*/
 
 #include <asm/system.h>
 #include <asm/irq.h>
index f6dbf406b1f84d26f578f4823e91c141af56b169..04f4715992bf8dd02022cf711ed01562ad2820f2 100644 (file)
@@ -9,11 +9,11 @@
  */
 
 #define __NO_VERSION__
-#include <linux/config.h>
-#include <linux/module.h>
-#include <linux/init.h>
+#include <xeno/config.h>
+#include <xeno/module.h>
+#include <xeno/init.h>
 
-#include <linux/blk.h>
+#include <xeno/blk.h>
 
 #include "scsi.h"
 #include "hosts.h"
@@ -205,6 +205,7 @@ MODULE_PARM_DESC(max_scsi_luns, "last scsi LUN (should be between 1 and 2^32-1)"
 
 static int __init scsi_luns_setup(char *str)
 {
+#if 0
        unsigned int tmp;
 
        if (get_option(&str, &tmp) == 1) {
@@ -215,6 +216,9 @@ static int __init scsi_luns_setup(char *str)
                       "(n should be between 1 and 2^32-1)\n");
                return 0;
        }
+#else
+       return 0;
+#endif
 }
 
 __setup("max_scsi_luns=", scsi_luns_setup);
@@ -343,10 +347,12 @@ void scan_scsis(struct Scsi_Host *shpnt,
 
        initialize_merge_fn(SDpnt);
 
+#if 0
         /*
          * Initialize the object that we will use to wait for command blocks.
          */
        init_waitqueue_head(&SDpnt->scpnt_wait);
+#endif
 
        /*
         * Next, hook the device to the host in question.
@@ -525,7 +531,9 @@ static int scan_scsis_single(unsigned int channel, unsigned int dev,
        Scsi_Device *SDtail, *SDpnt = *SDpnt2;
        Scsi_Request * SRpnt;
        int bflags, type = -1;
+#ifdef DEVFS_MUST_DIE
        extern devfs_handle_t scsi_devfs_handle;
+#endif
        int scsi_level;
 
        SDpnt->host = shpnt;
@@ -686,8 +694,10 @@ static int scan_scsis_single(unsigned int channel, unsigned int dev,
 
         sprintf (devname, "host%d/bus%d/target%d/lun%d",
                  SDpnt->host->host_no, SDpnt->channel, SDpnt->id, SDpnt->lun);
+#ifdef DEVFS_MUST_DIE
         if (SDpnt->de) printk ("DEBUG: dir: \"%s\" already exists\n", devname);
         else SDpnt->de = devfs_mk_dir (scsi_devfs_handle, devname, NULL);
+#endif
 
        for (sdtpnt = scsi_devicelist; sdtpnt;
             sdtpnt = sdtpnt->next)
@@ -796,10 +806,12 @@ static int scan_scsis_single(unsigned int channel, unsigned int dev,
         */
        SDpnt->online = TRUE;
 
+#if 0
         /*
          * Initialize the object that we will use to wait for command blocks.
          */
        init_waitqueue_head(&SDpnt->scpnt_wait);
+#endif
 
        /*
         * Since we just found one device, there had damn well better be one in the list
index 8a25a456ae591738672ab5730d5cdfe7d58e25c8..82b23e84ce60a8c6ae6b84fd52575714ab9ef97a 100644 (file)
@@ -3,17 +3,17 @@
  * a module.
  */
 #define __NO_VERSION__
-#include <linux/config.h>
-#include <linux/module.h>
+#include <xeno/config.h>
+#include <xeno/module.h>
 
-#include <linux/sched.h>
-#include <linux/timer.h>
-#include <linux/string.h>
-#include <linux/slab.h>
-#include <linux/ioport.h>
-#include <linux/kernel.h>
-#include <linux/blk.h>
-#include <linux/fs.h>
+#include <xeno/sched.h>
+#include <xeno/timer.h>
+/*  #include <xeno/string.h> */
+/*  #include <xeno/slab.h> */
+/*  #include <xeno/ioport.h> */
+/*  #include <xeno/kernel.h> */
+#include <xeno/blk.h>
+/* #include <xeno/fs.h> */
 
 #include <asm/system.h>
 #include <asm/irq.h>
index 77c784687943f17fd2625ef4946bdc2f82b9ed4f..ae13d27cdc965a0b10ea9ddb9402818cbc73c01f 100644 (file)
  */
 
 #define __NO_VERSION__
-#include <linux/module.h>
 
-#include <linux/fs.h>
-#include <linux/genhd.h>
-#include <linux/kernel.h>
-#include <linux/blk.h>
+#include <xeno/config.h>
+#include <xeno/module.h>
+
+
+/*#include <linux/fs.h>*/
+/*#include <linux/genhd.h>*/
+#include <xeno/blk.h>
+/*#include <linux/kernel.h>*/
 #include <asm/unaligned.h>
 #include "scsi.h"
 #include "hosts.h"
@@ -47,13 +50,21 @@ int scsicam_bios_param(Disk * disk, /* SCSI disk */
        int size = disk->capacity;
        unsigned long temp_cyl;
 
+#if 0
        if (!(bh = bread(MKDEV(MAJOR(dev), MINOR(dev)&~0xf), 0, block_size(dev))))
                return -1;
+#else 
+       bh = NULL; 
+       printk("scsicam_bios_param: bread not avail!\n"); 
+       BUG(); 
+#endif
 
        /* try to infer mapping from partition table */
        ret_code = scsi_partsize(bh, (unsigned long) size, (unsigned int *) ip + 2,
                       (unsigned int *) ip + 0, (unsigned int *) ip + 1);
+#if 0
        brelse(bh);
+#endif
 
        if (ret_code == -1) {
                /* pick some standard mapping with at most 1024 cylinders,
index 1e0749b9f0ba7f251d9a2e25f8372a4a4fecd082..dbb69d24478cb2a0f0122c62ab3ac63d664d2ef2 100644 (file)
  *       Fix problem where removable media could be ejected after sd_open.
  */
 
-#include <linux/config.h>
-#include <linux/module.h>
+#include <xeno/config.h>
+#include <xeno/module.h>
 
-#include <linux/fs.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/string.h>
-#include <linux/hdreg.h>
-#include <linux/errno.h>
-#include <linux/interrupt.h>
-#include <linux/init.h>
+/*  #include <xeno/fs.h> */
+/*  #include <xeno/kernel.h> */
+#include <xeno/sched.h>
+/*  #include <xeno/mm.h> */
+/*  #include <xeno/string.h> */
+#include <xeno/hdreg.h>
+/*  #include <xeno/errno.h> */
+/*  #include <xeno/interrupt.h> */
+#include <xeno/init.h>
 
-#include <linux/smp.h>
+/*  #include <xeno/smp.h> */
 
 #include <asm/uaccess.h>
 #include <asm/system.h>
 #include <asm/io.h>
 
 #define MAJOR_NR SCSI_DISK0_MAJOR
-#include <linux/blk.h>
-#include <linux/blkpg.h>
+#include <xeno/blk.h>
+#include <xeno/blkpg.h>
 #include "scsi.h"
 #include "hosts.h"
 #include "sd.h"
@@ -59,7 +59,7 @@
 #include "constants.h"
 #include <scsi/scsicam.h>      /* must follow "hosts.h" */
 
-#include <linux/genhd.h>
+#include <xeno/genhd.h>
 
 /*
  *  static const char RCSid[] = "$Header:";
@@ -185,16 +185,18 @@ static int sd_ioctl(struct inode * inode, struct file * file, unsigned int cmd,
        
                        diskinfo[0] = 0x40;
                        diskinfo[1] = 0x20;
-                       diskinfo[2] = rscsi_disks[DEVICE_NR(dev)].capacity >> 11;
+                       diskinfo[2] = 
+                           rscsi_disks[DEVICE_NR(dev)].capacity >> 11;
        
-                       /* override with calculated, extended default, or driver values */
+                       /* override with calculated, extended default,
+                           or driver values */
        
                        if(host->hostt->bios_param != NULL)
-                               host->hostt->bios_param(&rscsi_disks[DEVICE_NR(dev)],
-                                           dev,
-                                           &diskinfo[0]);
+                               host->hostt->bios_param(
+                                   &rscsi_disks[DEVICE_NR(dev)], dev,
+                                   &diskinfo[0]);
                        else scsicam_bios_param(&rscsi_disks[DEVICE_NR(dev)],
-                                       dev, &diskinfo[0]);
+                                               dev, &diskinfo[0]);
 
                        if (put_user(diskinfo[0], &loc->heads) ||
                                put_user(diskinfo[1], &loc->sectors) ||
@@ -207,7 +209,8 @@ static int sd_ioctl(struct inode * inode, struct file * file, unsigned int cmd,
                }
                case HDIO_GETGEO_BIG:
                {
-                       struct hd_big_geometry *loc = (struct hd_big_geometry *) arg;
+                       struct hd_big_geometry *loc = 
+                           (struct hd_big_geometry *) arg;
 
                        if(!loc)
                                return -EINVAL;
@@ -218,26 +221,30 @@ static int sd_ioctl(struct inode * inode, struct file * file, unsigned int cmd,
 
                        diskinfo[0] = 0x40;
                        diskinfo[1] = 0x20;
-                       diskinfo[2] = rscsi_disks[DEVICE_NR(dev)].capacity >> 11;
+                       diskinfo[2] = 
+                           rscsi_disks[DEVICE_NR(dev)].capacity >> 11;
 
-                       /* override with calculated, extended default, or driver values */
+                       /* override with calculated, extended default,
+                           or driver values */
 
                        if(host->hostt->bios_param != NULL)
-                               host->hostt->bios_param(&rscsi_disks[DEVICE_NR(dev)],
-                                           dev,
-                                           &diskinfo[0]);
+                               host->hostt->bios_param(
+                                   &rscsi_disks[DEVICE_NR(dev)], dev,
+                                   &diskinfo[0]);
                        else scsicam_bios_param(&rscsi_disks[DEVICE_NR(dev)],
-                                       dev, &diskinfo[0]);
+                                               dev, &diskinfo[0]);
 
                        if (put_user(diskinfo[0], &loc->heads) ||
                                put_user(diskinfo[1], &loc->sectors) ||
-                               put_user(diskinfo[2], (unsigned int *) &loc->cylinders) ||
+                               put_user(diskinfo[2], 
+                                        (unsigned int *) &loc->cylinders) ||
                                put_user(sd_gendisks[SD_MAJOR_IDX(
                                    inode->i_rdev)].part[MINOR(
                                    inode->i_rdev)].start_sect, &loc->start))
                                return -EFAULT;
                        return 0;
                }
+#if 0
                case BLKGETSIZE:
                case BLKGETSIZE64:
                case BLKROSET:
@@ -257,314 +264,321 @@ static int sd_ioctl(struct inode * inode, struct file * file, unsigned int cmd,
                        if (!capable(CAP_SYS_ADMIN))
                                return -EACCES;
                        return revalidate_scsidisk(dev, 1);
+#endif
 
                default:
-                       return scsi_ioctl(rscsi_disks[DEVICE_NR(dev)].device , cmd, (void *) arg);
+                       return scsi_ioctl(rscsi_disks[DEVICE_NR(dev)].device, 
+                                         cmd, (void *) arg);
        }
 }
 
 static void sd_devname(unsigned int disknum, char *buffer)
 {
-       if (disknum < 26)
-               sprintf(buffer, "sd%c", 'a' + disknum);
-       else {
-               unsigned int min1;
-               unsigned int min2;
-               /*
-                * For larger numbers of disks, we need to go to a new
-                * naming scheme.
-                */
-               min1 = disknum / 26;
-               min2 = disknum % 26;
-               sprintf(buffer, "sd%c%c", 'a' + min1 - 1, 'a' + min2);
-       }
+    if (disknum < 26)
+       sprintf(buffer, "sd%c", 'a' + disknum);
+    else {
+       unsigned int min1;
+       unsigned int min2;
+       /*
+        * For larger numbers of disks, we need to go to a new
+        * naming scheme.
+        */
+       min1 = disknum / 26;
+       min2 = disknum % 26;
+       sprintf(buffer, "sd%c%c", 'a' + min1 - 1, 'a' + min2);
+    }
 }
 
 static request_queue_t *sd_find_queue(kdev_t dev)
 {
-       Scsi_Disk *dpnt;
-       int target;
-       target = DEVICE_NR(dev);
-
-       dpnt = &rscsi_disks[target];
-       if (!dpnt->device)
-               return NULL;    /* No such device */
-       return &dpnt->device->request_queue;
+    Scsi_Disk *dpnt;
+    int target;
+    target = DEVICE_NR(dev);
+
+    dpnt = &rscsi_disks[target];
+    if (!dpnt->device)
+       return NULL;    /* No such device */
+    return &dpnt->device->request_queue;
 }
 
 static int sd_init_command(Scsi_Cmnd * SCpnt)
 {
-       int dev, block, this_count;
-       struct hd_struct *ppnt;
-       Scsi_Disk *dpnt;
+    int dev, block, this_count;
+    struct hd_struct *ppnt;
+    Scsi_Disk *dpnt;
 #if CONFIG_SCSI_LOGGING
-       char nbuff[6];
+    char nbuff[6];
 #endif
 
-       ppnt = &sd_gendisks[SD_MAJOR_IDX(SCpnt->request.rq_dev)].part[MINOR(SCpnt->request.rq_dev)];
-       dev = DEVICE_NR(SCpnt->request.rq_dev);
+    ppnt = &sd_gendisks[SD_MAJOR_IDX(SCpnt->request.rq_dev)].part[MINOR(SCpnt->request.rq_dev)];
+    dev = DEVICE_NR(SCpnt->request.rq_dev);
 
-       block = SCpnt->request.sector;
-       this_count = SCpnt->request_bufflen >> 9;
+    block = SCpnt->request.sector;
+    this_count = SCpnt->request_bufflen >> 9;
 
-       SCSI_LOG_HLQUEUE(1, printk("Doing sd request, dev = 0x%x, block = %d\n",
-           SCpnt->request.rq_dev, block));
-
-       dpnt = &rscsi_disks[dev];
-       if (dev >= sd_template.dev_max ||
-           !dpnt->device ||
-           !dpnt->device->online ||
-           block + SCpnt->request.nr_sectors > ppnt->nr_sects) {
-               SCSI_LOG_HLQUEUE(2, printk("Finishing %ld sectors\n", SCpnt->request.nr_sectors));
-               SCSI_LOG_HLQUEUE(2, printk("Retry with 0x%p\n", SCpnt));
-               return 0;
-       }
-       block += ppnt->start_sect;
-       if (dpnt->device->changed) {
-               /*
-                * quietly refuse to do anything to a changed disc until the changed
-                * bit has been reset
-                */
-               /* printk("SCSI disk has been changed. Prohibiting further I/O.\n"); */
-               return 0;
-       }
-       SCSI_LOG_HLQUEUE(2, sd_devname(dev, nbuff));
-       SCSI_LOG_HLQUEUE(2, printk("%s : real dev = /dev/%d, block = %d\n",
-                                  nbuff, dev, block));
+    SCSI_LOG_HLQUEUE(1, printk("Doing sd request, dev = 0x%x, block = %d\n",
+                              SCpnt->request.rq_dev, block));
 
+    dpnt = &rscsi_disks[dev];
+    if (dev >= sd_template.dev_max ||
+       !dpnt->device ||
+       !dpnt->device->online ||
+       block + SCpnt->request.nr_sectors > ppnt->nr_sects) {
+       SCSI_LOG_HLQUEUE(2, printk("Finishing %ld sectors\n", 
+                                  SCpnt->request.nr_sectors));
+       SCSI_LOG_HLQUEUE(2, printk("Retry with 0x%p\n", SCpnt));
+       return 0;
+    }
+    block += ppnt->start_sect;
+    if (dpnt->device->changed) {
        /*
-        * If we have a 1K hardware sectorsize, prevent access to single
-        * 512 byte sectors.  In theory we could handle this - in fact
-        * the scsi cdrom driver must be able to handle this because
-        * we typically use 1K blocksizes, and cdroms typically have
-        * 2K hardware sectorsizes.  Of course, things are simpler
-        * with the cdrom, since it is read-only.  For performance
-        * reasons, the filesystems should be able to handle this
-        * and not force the scsi disk driver to use bounce buffers
-        * for this.
+        * quietly refuse to do anything to a changed disc until the changed
+        * bit has been reset
         */
-       if (dpnt->device->sector_size == 1024) {
-               if ((block & 1) || (SCpnt->request.nr_sectors & 1)) {
-                       printk("sd.c:Bad block number requested");
-                       return 0;
-               } else {
-                       block = block >> 1;
-                       this_count = this_count >> 1;
-               }
-       }
-       if (dpnt->device->sector_size == 2048) {
-               if ((block & 3) || (SCpnt->request.nr_sectors & 3)) {
-                       printk("sd.c:Bad block number requested");
-                       return 0;
-               } else {
-                       block = block >> 2;
-                       this_count = this_count >> 2;
-               }
-       }
-       if (dpnt->device->sector_size == 4096) {
-               if ((block & 7) || (SCpnt->request.nr_sectors & 7)) {
-                       printk("sd.c:Bad block number requested");
-                       return 0;
-               } else {
-                       block = block >> 3;
-                       this_count = this_count >> 3;
-               }
+       /* printk("SCSI disk has been changed. Prohibiting further I/O.\n"); */
+       return 0;
+    }
+    SCSI_LOG_HLQUEUE(2, sd_devname(dev, nbuff));
+    SCSI_LOG_HLQUEUE(2, printk("%s : real dev = /dev/%d, block = %d\n",
+                              nbuff, dev, block));
+
+    /*
+     * If we have a 1K hardware sectorsize, prevent access to single
+     * 512 byte sectors.  In theory we could handle this - in fact
+     * the scsi cdrom driver must be able to handle this because
+     * we typically use 1K blocksizes, and cdroms typically have
+     * 2K hardware sectorsizes.  Of course, things are simpler
+     * with the cdrom, since it is read-only.  For performance
+     * reasons, the filesystems should be able to handle this
+     * and not force the scsi disk driver to use bounce buffers
+     * for this.
+     */
+    if (dpnt->device->sector_size == 1024) {
+       if ((block & 1) || (SCpnt->request.nr_sectors & 1)) {
+           printk("sd.c:Bad block number requested");
+           return 0;
+       } else {
+           block = block >> 1;
+           this_count = this_count >> 1;
        }
-       switch (SCpnt->request.cmd) {
-       case WRITE:
-               if (!dpnt->device->writeable) {
-                       return 0;
-               }
-               SCpnt->cmnd[0] = WRITE_6;
-               SCpnt->sc_data_direction = SCSI_DATA_WRITE;
-               break;
-       case READ:
-               SCpnt->cmnd[0] = READ_6;
-               SCpnt->sc_data_direction = SCSI_DATA_READ;
-               break;
-       default:
-               panic("Unknown sd command %d\n", SCpnt->request.cmd);
+    }
+    if (dpnt->device->sector_size == 2048) {
+       if ((block & 3) || (SCpnt->request.nr_sectors & 3)) {
+           printk("sd.c:Bad block number requested");
+           return 0;
+       } else {
+           block = block >> 2;
+           this_count = this_count >> 2;
        }
-
-       SCSI_LOG_HLQUEUE(2, printk("%s : %s %d/%ld 512 byte blocks.\n",
-                                  nbuff,
-                  (SCpnt->request.cmd == WRITE) ? "writing" : "reading",
-                                this_count, SCpnt->request.nr_sectors));
-
-       SCpnt->cmnd[1] = (SCpnt->device->scsi_level <= SCSI_2) ?
-                        ((SCpnt->lun << 5) & 0xe0) : 0;
-
-       if (((this_count > 0xff) || (block > 0x1fffff)) || SCpnt->device->ten) {
-               if (this_count > 0xffff)
-                       this_count = 0xffff;
-
-               SCpnt->cmnd[0] += READ_10 - READ_6;
-               SCpnt->cmnd[2] = (unsigned char) (block >> 24) & 0xff;
-               SCpnt->cmnd[3] = (unsigned char) (block >> 16) & 0xff;
-               SCpnt->cmnd[4] = (unsigned char) (block >> 8) & 0xff;
-               SCpnt->cmnd[5] = (unsigned char) block & 0xff;
-               SCpnt->cmnd[6] = SCpnt->cmnd[9] = 0;
-               SCpnt->cmnd[7] = (unsigned char) (this_count >> 8) & 0xff;
-               SCpnt->cmnd[8] = (unsigned char) this_count & 0xff;
+    }
+    if (dpnt->device->sector_size == 4096) {
+       if ((block & 7) || (SCpnt->request.nr_sectors & 7)) {
+           printk("sd.c:Bad block number requested");
+           return 0;
        } else {
-               if (this_count > 0xff)
-                       this_count = 0xff;
-
-               SCpnt->cmnd[1] |= (unsigned char) ((block >> 16) & 0x1f);
-               SCpnt->cmnd[2] = (unsigned char) ((block >> 8) & 0xff);
-               SCpnt->cmnd[3] = (unsigned char) block & 0xff;
-               SCpnt->cmnd[4] = (unsigned char) this_count;
-               SCpnt->cmnd[5] = 0;
+           block = block >> 3;
+           this_count = this_count >> 3;
        }
-
-       /*
-        * We shouldn't disconnect in the middle of a sector, so with a dumb
-        * host adapter, it's safe to assume that we can at least transfer
-        * this many bytes between each connect / disconnect.
-        */
-       SCpnt->transfersize = dpnt->device->sector_size;
-       SCpnt->underflow = this_count << 9;
-
-       SCpnt->allowed = MAX_RETRIES;
-       SCpnt->timeout_per_command = (SCpnt->device->type == TYPE_DISK ?
-                                     SD_TIMEOUT : SD_MOD_TIMEOUT);
-
-       /*
-        * This is the completion routine we use.  This is matched in terms
-        * of capability to this function.
-        */
-       SCpnt->done = rw_intr;
-
-       /*
-        * This indicates that the command is ready from our end to be
-        * queued.
-        */
-       return 1;
+    }
+    switch (SCpnt->request.cmd) {
+    case WRITE:
+       if (!dpnt->device->writeable) {
+           return 0;
+       }
+       SCpnt->cmnd[0] = WRITE_6;
+       SCpnt->sc_data_direction = SCSI_DATA_WRITE;
+       break;
+    case READ:
+       SCpnt->cmnd[0] = READ_6;
+       SCpnt->sc_data_direction = SCSI_DATA_READ;
+       break;
+    default:
+       panic("Unknown sd command %d\n", SCpnt->request.cmd);
+    }
+
+    SCSI_LOG_HLQUEUE(2, printk("%s : %s %d/%ld 512 byte blocks.\n", nbuff,
+                              (SCpnt->request.cmd == WRITE) ? "writing" : 
+                              "reading", this_count, 
+                              SCpnt->request.nr_sectors));
+
+    SCpnt->cmnd[1] = (SCpnt->device->scsi_level <= SCSI_2) ?
+       ((SCpnt->lun << 5) & 0xe0) : 0;
+
+    if (((this_count > 0xff) || (block > 0x1fffff)) || SCpnt->device->ten) {
+       if (this_count > 0xffff)
+           this_count = 0xffff;
+
+       SCpnt->cmnd[0] += READ_10 - READ_6;
+       SCpnt->cmnd[2] = (unsigned char) (block >> 24) & 0xff;
+       SCpnt->cmnd[3] = (unsigned char) (block >> 16) & 0xff;
+       SCpnt->cmnd[4] = (unsigned char) (block >> 8) & 0xff;
+       SCpnt->cmnd[5] = (unsigned char) block & 0xff;
+       SCpnt->cmnd[6] = SCpnt->cmnd[9] = 0;
+       SCpnt->cmnd[7] = (unsigned char) (this_count >> 8) & 0xff;
+       SCpnt->cmnd[8] = (unsigned char) this_count & 0xff;
+    } else {
+       if (this_count > 0xff)
+           this_count = 0xff;
+
+       SCpnt->cmnd[1] |= (unsigned char) ((block >> 16) & 0x1f);
+       SCpnt->cmnd[2] = (unsigned char) ((block >> 8) & 0xff);
+       SCpnt->cmnd[3] = (unsigned char) block & 0xff;
+       SCpnt->cmnd[4] = (unsigned char) this_count;
+       SCpnt->cmnd[5] = 0;
+    }
+
+    /*
+     * We shouldn't disconnect in the middle of a sector, so with a dumb
+     * host adapter, it's safe to assume that we can at least transfer
+     * this many bytes between each connect / disconnect.
+     */
+    SCpnt->transfersize = dpnt->device->sector_size;
+    SCpnt->underflow = this_count << 9;
+
+    SCpnt->allowed = MAX_RETRIES;
+    SCpnt->timeout_per_command = (SCpnt->device->type == TYPE_DISK ?
+                                 SD_TIMEOUT : SD_MOD_TIMEOUT);
+
+    /*
+     * This is the completion routine we use.  This is matched in terms
+     * of capability to this function.
+     */
+    SCpnt->done = rw_intr;
+
+    /*
+     * This indicates that the command is ready from our end to be
+     * queued.
+     */
+    return 1;
 }
 
 static int sd_open(struct inode *inode, struct file *filp)
 {
-       int target, retval = -ENXIO;
-       Scsi_Device * SDev;
-       target = DEVICE_NR(inode->i_rdev);
-
-       SCSI_LOG_HLQUEUE(1, printk("target=%d, max=%d\n", target, sd_template.dev_max));
+    int target, retval = -ENXIO;
+    Scsi_Device * SDev;
+    target = DEVICE_NR(inode->i_rdev);
+
+    SCSI_LOG_HLQUEUE(1, printk("target=%d, max=%d\n", target, sd_template.dev_max));
+
+    if (target >= sd_template.dev_max || !rscsi_disks[target].device)
+       return -ENXIO;  /* No such device */
+
+    /*
+     * If the device is in error recovery, wait until it is done.
+     * If the device is offline, then disallow any access to it.
+     */
+    if (!scsi_block_when_processing_errors(rscsi_disks[target].device)) {
+       return -ENXIO;
+    }
+    /*
+     * Make sure that only one process can do a check_change_disk at one time.
+     * This is also used to lock out further access when the partition table
+     * is being re-read.
+     */
+
+    while (rscsi_disks[target].device->busy) {
+       barrier();
+       cpu_relax();
+    }
+    /*
+     * The following code can sleep.
+     * Module unloading must be prevented
+     */
+    SDev = rscsi_disks[target].device;
+    if (SDev->host->hostt->module)
+       __MOD_INC_USE_COUNT(SDev->host->hostt->module);
+    if (sd_template.module)
+       __MOD_INC_USE_COUNT(sd_template.module);
+    SDev->access_count++;
+
+#if 0 
+    if (rscsi_disks[target].device->removable) {
+       SDev->allow_revalidate = 1;
+       check_disk_change(inode->i_rdev);
+       SDev->allow_revalidate = 0;
 
-       if (target >= sd_template.dev_max || !rscsi_disks[target].device)
-               return -ENXIO;  /* No such device */
 
        /*
-        * If the device is in error recovery, wait until it is done.
-        * If the device is offline, then disallow any access to it.
+        * If the drive is empty, just let the open fail.
         */
-       if (!scsi_block_when_processing_errors(rscsi_disks[target].device)) {
-               return -ENXIO;
+       if ((!rscsi_disks[target].ready) && !(filp->f_flags & O_NDELAY)) {
+           retval = -ENOMEDIUM;
+           goto error_out;
        }
-       /*
-        * Make sure that only one process can do a check_change_disk at one time.
-        * This is also used to lock out further access when the partition table
-        * is being re-read.
-        */
-
-       while (rscsi_disks[target].device->busy) {
-               barrier();
-               cpu_relax();
-       }
-       /*
-        * The following code can sleep.
-        * Module unloading must be prevented
-        */
-       SDev = rscsi_disks[target].device;
-       if (SDev->host->hostt->module)
-               __MOD_INC_USE_COUNT(SDev->host->hostt->module);
-       if (sd_template.module)
-               __MOD_INC_USE_COUNT(sd_template.module);
-       SDev->access_count++;
-
-       if (rscsi_disks[target].device->removable) {
-               SDev->allow_revalidate = 1;
-               check_disk_change(inode->i_rdev);
-               SDev->allow_revalidate = 0;
-
-               /*
-                * If the drive is empty, just let the open fail.
-                */
-               if ((!rscsi_disks[target].ready) && !(filp->f_flags & O_NDELAY)) {
-                       retval = -ENOMEDIUM;
-                       goto error_out;
-               }
 
-               /*
-                * Similarly, if the device has the write protect tab set,
-                * have the open fail if the user expects to be able to write
-                * to the thing.
-                */
-               if ((rscsi_disks[target].write_prot) && (filp->f_mode & 2)) {
-                       retval = -EROFS;
-                       goto error_out;
-               }
-       }
        /*
-        * It is possible that the disk changing stuff resulted in the device
-        * being taken offline.  If this is the case, report this to the user,
-        * and don't pretend that
-        * the open actually succeeded.
+        * Similarly, if the device has the write protect tab set,
+        * have the open fail if the user expects to be able to write
+        * to the thing.
         */
-       if (!SDev->online) {
-               goto error_out;
-       }
-       /*
-        * See if we are requesting a non-existent partition.  Do this
-        * after checking for disk change.
-        */
-       if (sd_sizes[SD_PARTITION(inode->i_rdev)] == 0) {
-               goto error_out;
+       if ((rscsi_disks[target].write_prot) && (filp->f_mode & 2)) {
+           retval = -EROFS;
+           goto error_out;
        }
+    }
+#endif
 
-       if (SDev->removable)
-               if (SDev->access_count==1)
-                       if (scsi_block_when_processing_errors(SDev))
-                               scsi_ioctl(SDev, SCSI_IOCTL_DOORLOCK, NULL);
+    /*
+     * It is possible that the disk changing stuff resulted in the device
+     * being taken offline.  If this is the case, report this to the user,
+     * and don't pretend that
+     * the open actually succeeded.
+     */
+    if (!SDev->online) {
+       goto error_out;
+    }
+    /*
+     * See if we are requesting a non-existent partition.  Do this
+     * after checking for disk change.
+     */
+    if (sd_sizes[SD_PARTITION(inode->i_rdev)] == 0) {
+       goto error_out;
+    }
+
+    if (SDev->removable)
+       if (SDev->access_count==1)
+           if (scsi_block_when_processing_errors(SDev))
+               scsi_ioctl(SDev, SCSI_IOCTL_DOORLOCK, NULL);
 
        
-       return 0;
+    return 0;
 
-error_out:
-       SDev->access_count--;
-       if (SDev->host->hostt->module)
-               __MOD_DEC_USE_COUNT(SDev->host->hostt->module);
-       if (sd_template.module)
-               __MOD_DEC_USE_COUNT(sd_template.module);
-       return retval;  
+ error_out:
+    SDev->access_count--;
+    if (SDev->host->hostt->module)
+       __MOD_DEC_USE_COUNT(SDev->host->hostt->module);
+    if (sd_template.module)
+       __MOD_DEC_USE_COUNT(sd_template.module);
+    return retval;     
 }
 
 static int sd_release(struct inode *inode, struct file *file)
 {
-       int target;
-       Scsi_Device * SDev;
-
-       target = DEVICE_NR(inode->i_rdev);
-       SDev = rscsi_disks[target].device;
-       if (!SDev)
-               return -ENODEV;
-
-       SDev->access_count--;
-
-       if (SDev->removable) {
-               if (!SDev->access_count)
-                       if (scsi_block_when_processing_errors(SDev))
-                               scsi_ioctl(SDev, SCSI_IOCTL_DOORUNLOCK, NULL);
-       }
-       if (SDev->host->hostt->module)
-               __MOD_DEC_USE_COUNT(SDev->host->hostt->module);
-       if (sd_template.module)
-               __MOD_DEC_USE_COUNT(sd_template.module);
-       return 0;
+    int target;
+    Scsi_Device * SDev;
+    
+    target = DEVICE_NR(inode->i_rdev);
+    SDev = rscsi_disks[target].device;
+    if (!SDev)
+       return -ENODEV;
+    
+    SDev->access_count--;
+    
+    if (SDev->removable) {
+       if (!SDev->access_count)
+           if (scsi_block_when_processing_errors(SDev))
+               scsi_ioctl(SDev, SCSI_IOCTL_DOORUNLOCK, NULL);
+    }
+    if (SDev->host->hostt->module)
+       __MOD_DEC_USE_COUNT(SDev->host->hostt->module);
+    if (sd_template.module)
+       __MOD_DEC_USE_COUNT(sd_template.module);
+    return 0;
 }
 
 static struct block_device_operations sd_fops =
 {
-       owner:                  THIS_MODULE,
+/*     owner:                  THIS_MODULE, */
        open:                   sd_open,
        release:                sd_release,
        ioctl:                  sd_ioctl,
@@ -596,101 +610,102 @@ static struct gendisk sd_gendisk =
 
 static void rw_intr(Scsi_Cmnd * SCpnt)
 {
-       int result = SCpnt->result;
+    int result = SCpnt->result;
 #if CONFIG_SCSI_LOGGING
-       char nbuff[6];
+    char nbuff[6];
 #endif
-       int this_count = SCpnt->bufflen >> 9;
-       int good_sectors = (result == 0 ? this_count : 0);
-       int block_sectors = 1;
-       long error_sector;
-
-       SCSI_LOG_HLCOMPLETE(1, sd_devname(DEVICE_NR(SCpnt->request.rq_dev), nbuff));
-
-       SCSI_LOG_HLCOMPLETE(1, printk("%s : rw_intr(%d, %x [%x %x])\n", nbuff,
-                                     SCpnt->host->host_no,
-                                     result,
-                                     SCpnt->sense_buffer[0],
-                                     SCpnt->sense_buffer[2]));
-
-       /*
-          Handle MEDIUM ERRORs that indicate partial success.  Since this is a
-          relatively rare error condition, no care is taken to avoid
-          unnecessary additional work such as memcpy's that could be avoided.
-        */
-
-       /* An error occurred */
-       if (driver_byte(result) != 0 &&         /* An error occured */
-           SCpnt->sense_buffer[0] == 0xF0) {   /* Sense data is valid */
-               switch (SCpnt->sense_buffer[2]) {
-               case MEDIUM_ERROR:
-                       error_sector = (SCpnt->sense_buffer[3] << 24) |
-                       (SCpnt->sense_buffer[4] << 16) |
-                       (SCpnt->sense_buffer[5] << 8) |
-                       SCpnt->sense_buffer[6];
-                       if (SCpnt->request.bh != NULL)
-                               block_sectors = SCpnt->request.bh->b_size >> 9;
-                       switch (SCpnt->device->sector_size) {
-                       case 1024:
-                               error_sector <<= 1;
-                               if (block_sectors < 2)
-                                       block_sectors = 2;
-                               break;
-                       case 2048:
-                               error_sector <<= 2;
-                               if (block_sectors < 4)
-                                       block_sectors = 4;
-                               break;
-                       case 4096:
-                               error_sector <<=3;
-                               if (block_sectors < 8)
-                                       block_sectors = 8;
-                               break;
-                       case 256:
-                               error_sector >>= 1;
-                               break;
-                       default:
-                               break;
-                       }
-                       error_sector -= sd_gendisks[SD_MAJOR_IDX(
-                               SCpnt->request.rq_dev)].part[MINOR(
-                               SCpnt->request.rq_dev)].start_sect;
-                       error_sector &= ~(block_sectors - 1);
-                       good_sectors = error_sector - SCpnt->request.sector;
-                       if (good_sectors < 0 || good_sectors >= this_count)
-                               good_sectors = 0;
-                       break;
-
-               case RECOVERED_ERROR:
-                       /*
-                        * An error occured, but it recovered.  Inform the
-                        * user, but make sure that it's not treated as a
-                        * hard error.
-                        */
-                       print_sense("sd", SCpnt);
-                       result = 0;
-                       SCpnt->sense_buffer[0] = 0x0;
-                       good_sectors = this_count;
-                       break;
-
-               case ILLEGAL_REQUEST:
-                       if (SCpnt->device->ten == 1) {
-                               if (SCpnt->cmnd[0] == READ_10 ||
-                                   SCpnt->cmnd[0] == WRITE_10)
-                                       SCpnt->device->ten = 0;
-                       }
-                       break;
-
-               default:
-                       break;
-               }
+    int this_count = SCpnt->bufflen >> 9;
+    int good_sectors = (result == 0 ? this_count : 0);
+    int block_sectors = 1;
+    long error_sector;
+    
+    SCSI_LOG_HLCOMPLETE(1, sd_devname(DEVICE_NR(SCpnt->request.rq_dev), 
+                                     nbuff));
+    
+    SCSI_LOG_HLCOMPLETE(1, printk("%s : rw_intr(%d, %x [%x %x])\n", nbuff,
+                                 SCpnt->host->host_no,
+                                 result,
+                                 SCpnt->sense_buffer[0],
+                                 SCpnt->sense_buffer[2]));
+    
+    /*
+      Handle MEDIUM ERRORs that indicate partial success.  Since this is a
+      relatively rare error condition, no care is taken to avoid
+      unnecessary additional work such as memcpy's that could be avoided.
+    */
+    
+    /* An error occurred */
+    if (driver_byte(result) != 0 &&    /* An error occured */
+       SCpnt->sense_buffer[0] == 0xF0) {       /* Sense data is valid */
+       switch (SCpnt->sense_buffer[2]) {
+       case MEDIUM_ERROR:
+           error_sector = (SCpnt->sense_buffer[3] << 24) |
+               (SCpnt->sense_buffer[4] << 16) |
+               (SCpnt->sense_buffer[5] << 8) |
+               SCpnt->sense_buffer[6];
+           if (SCpnt->request.bh != NULL)
+               block_sectors = SCpnt->request.bh->b_size >> 9;
+           switch (SCpnt->device->sector_size) {
+           case 1024:
+               error_sector <<= 1;
+               if (block_sectors < 2)
+                   block_sectors = 2;
+               break;
+           case 2048:
+               error_sector <<= 2;
+               if (block_sectors < 4)
+                   block_sectors = 4;
+               break;
+           case 4096:
+               error_sector <<=3;
+               if (block_sectors < 8)
+                   block_sectors = 8;
+               break;
+           case 256:
+               error_sector >>= 1;
+               break;
+           default:
+               break;
+           }
+           error_sector -= sd_gendisks[SD_MAJOR_IDX(
+               SCpnt->request.rq_dev)].part[MINOR(
+                   SCpnt->request.rq_dev)].start_sect;
+           error_sector &= ~(block_sectors - 1);
+           good_sectors = error_sector - SCpnt->request.sector;
+           if (good_sectors < 0 || good_sectors >= this_count)
+               good_sectors = 0;
+           break;
+           
+       case RECOVERED_ERROR:
+           /*
+            * An error occured, but it recovered.  Inform the
+            * user, but make sure that it's not treated as a
+            * hard error.
+            */
+           print_sense("sd", SCpnt);
+           result = 0;
+           SCpnt->sense_buffer[0] = 0x0;
+           good_sectors = this_count;
+           break;
+           
+       case ILLEGAL_REQUEST:
+           if (SCpnt->device->ten == 1) {
+               if (SCpnt->cmnd[0] == READ_10 ||
+                   SCpnt->cmnd[0] == WRITE_10)
+                   SCpnt->device->ten = 0;
+           }
+           break;
+           
+       default:
+           break;
        }
-       /*
-        * This calls the generic completion function, now that we know
-        * how many actual sectors finished, and how many sectors we need
-        * to say have failed.
-        */
-       scsi_io_completion(SCpnt, good_sectors, block_sectors);
+    }
+    /*
+     * This calls the generic completion function, now that we know
+     * how many actual sectors finished, and how many sectors we need
+     * to say have failed.
+     */
+    scsi_io_completion(SCpnt, good_sectors, block_sectors);
 }
 /*
  * requeue_sd_request() is the request handler function for the sd driver.
@@ -701,386 +716,387 @@ static void rw_intr(Scsi_Cmnd * SCpnt)
 
 static int check_scsidisk_media_change(kdev_t full_dev)
 {
-       int retval;
-       int target;
-       int flag = 0;
-       Scsi_Device * SDev;
-
-       target = DEVICE_NR(full_dev);
-       SDev = rscsi_disks[target].device;
+    int retval;
+    int target;
+    int flag = 0;
+    Scsi_Device * SDev;
 
-       if (target >= sd_template.dev_max || !SDev) {
-               printk("SCSI disk request error: invalid device.\n");
-               return 0;
-       }
-       if (!SDev->removable)
-               return 0;
+    target = DEVICE_NR(full_dev);
+    SDev = rscsi_disks[target].device;
 
-       /*
-        * If the device is offline, don't send any commands - just pretend as
-        * if the command failed.  If the device ever comes back online, we
-        * can deal with it then.  It is only because of unrecoverable errors
-        * that we would ever take a device offline in the first place.
-        */
-       if (SDev->online == FALSE) {
-               rscsi_disks[target].ready = 0;
-               SDev->changed = 1;
-               return 1;       /* This will force a flush, if called from
-                                * check_disk_change */
-       }
-
-       /* Using Start/Stop enables differentiation between drive with
-        * no cartridge loaded - NOT READY, drive with changed cartridge -
-        * UNIT ATTENTION, or with same cartridge - GOOD STATUS.
-        * This also handles drives that auto spin down. eg iomega jaz 1GB
-        * as this will spin up the drive.
-        */
-       retval = -ENODEV;
-       if (scsi_block_when_processing_errors(SDev))
-               retval = scsi_ioctl(SDev, SCSI_IOCTL_START_UNIT, NULL);
+    if (target >= sd_template.dev_max || !SDev) {
+       printk("SCSI disk request error: invalid device.\n");
+       return 0;
+    }
+    if (!SDev->removable)
+       return 0;
 
-       if (retval) {           /* Unable to test, unit probably not ready.
+    /*
+     * If the device is offline, don't send any commands - just pretend as
+     * if the command failed.  If the device ever comes back online, we
+     * can deal with it then.  It is only because of unrecoverable errors
+     * that we would ever take a device offline in the first place.
+     */
+    if (SDev->online == FALSE) {
+       rscsi_disks[target].ready = 0;
+       SDev->changed = 1;
+       return 1;       /* This will force a flush, if called from
+                        * check_disk_change */
+    }
+
+    /* Using Start/Stop enables differentiation between drive with
+     * no cartridge loaded - NOT READY, drive with changed cartridge -
+     * UNIT ATTENTION, or with same cartridge - GOOD STATUS.
+     * This also handles drives that auto spin down. eg iomega jaz 1GB
+     * as this will spin up the drive.
+     */
+    retval = -ENODEV;
+    if (scsi_block_when_processing_errors(SDev))
+       retval = scsi_ioctl(SDev, SCSI_IOCTL_START_UNIT, NULL);
+
+    if (retval) {              /* Unable to test, unit probably not ready.
                                 * This usually means there is no disc in the
                                 * drive.  Mark as changed, and we will figure
                                 * it out later once the drive is available
                                 * again.  */
 
-               rscsi_disks[target].ready = 0;
-               SDev->changed = 1;
-               return 1;       /* This will force a flush, if called from
-                                * check_disk_change */
-       }
-       /*
-        * for removable scsi disk ( FLOPTICAL ) we have to recognise the
-        * presence of disk in the drive. This is kept in the Scsi_Disk
-        * struct and tested at open !  Daniel Roche ( dan@lectra.fr )
-        */
-
-       rscsi_disks[target].ready = 1;  /* FLOPTICAL */
-
-       retval = SDev->changed;
-       if (!flag)
-               SDev->changed = 0;
-       return retval;
+       rscsi_disks[target].ready = 0;
+       SDev->changed = 1;
+       return 1;       /* This will force a flush, if called from
+                        * check_disk_change */
+    }
+    /*
+     * for removable scsi disk ( FLOPTICAL ) we have to recognise the
+     * presence of disk in the drive. This is kept in the Scsi_Disk
+     * struct and tested at open !  Daniel Roche ( dan@lectra.fr )
+     */
+
+    rscsi_disks[target].ready = 1;     /* FLOPTICAL */
+
+    retval = SDev->changed;
+    if (!flag)
+       SDev->changed = 0;
+    return retval;
 }
 
 static int sd_init_onedisk(int i)
 {
-       unsigned char cmd[10];
-       char nbuff[6];
-       unsigned char *buffer;
-       unsigned long spintime_value = 0;
-       int the_result, retries, spintime;
-       int sector_size;
-       Scsi_Request *SRpnt;
-
-       /*
-        * Get the name of the disk, in case we need to log it somewhere.
-        */
-       sd_devname(i, nbuff);
+    unsigned char cmd[10];
+    char nbuff[6];
+    unsigned char *buffer;
+    unsigned long spintime_value = 0;
+    int the_result, retries, spintime;
+    int sector_size;
+    Scsi_Request *SRpnt;
+
+    /*
+     * Get the name of the disk, in case we need to log it somewhere.
+     */
+    sd_devname(i, nbuff);
+
+    /*
+     * If the device is offline, don't try and read capacity or any
+     * of the other niceties.
+     */
+    if (rscsi_disks[i].device->online == FALSE)
+       return i;
 
-       /*
-        * If the device is offline, don't try and read capacity or any
-        * of the other niceties.
-        */
-       if (rscsi_disks[i].device->online == FALSE)
-               return i;
+    /*
+     * We need to retry the READ_CAPACITY because a UNIT_ATTENTION is
+     * considered a fatal error, and many devices report such an error
+     * just after a scsi bus reset.
+     */
 
-       /*
-        * We need to retry the READ_CAPACITY because a UNIT_ATTENTION is
-        * considered a fatal error, and many devices report such an error
-        * just after a scsi bus reset.
-        */
+    SRpnt = scsi_allocate_request(rscsi_disks[i].device);
+    if (!SRpnt) {
+       printk(KERN_WARNING 
+              "(sd_init_onedisk:) Request allocation failure.\n");
+       return i;
+    }
 
-       SRpnt = scsi_allocate_request(rscsi_disks[i].device);
-       if (!SRpnt) {
-               printk(KERN_WARNING "(sd_init_onedisk:) Request allocation failure.\n");
-               return i;
+    buffer = (unsigned char *) scsi_malloc(512);
+    if (!buffer) {
+       printk(KERN_WARNING "(sd_init_onedisk:) Memory allocation failure.\n");
+       scsi_release_request(SRpnt);
+       return i;
+    }
+
+    spintime = 0;
+
+    /* Spin up drives, as required.  Only do this at boot time */
+    /* Spinup needs to be done for module loads too. */
+    do {
+       retries = 0;
+
+       while (retries < 3) {
+           cmd[0] = TEST_UNIT_READY;
+           cmd[1] = (rscsi_disks[i].device->scsi_level <= SCSI_2) ?
+               ((rscsi_disks[i].device->lun << 5) & 0xe0) : 0;
+           memset((void *) &cmd[2], 0, 8);
+           SRpnt->sr_cmd_len = 0;
+           SRpnt->sr_sense_buffer[0] = 0;
+           SRpnt->sr_sense_buffer[2] = 0;
+           SRpnt->sr_data_direction = SCSI_DATA_NONE;
+
+           scsi_wait_req (SRpnt, (void *) cmd, (void *) buffer,
+                          0/*512*/, SD_TIMEOUT, MAX_RETRIES);
+
+           the_result = SRpnt->sr_result;
+           retries++;
+           if (the_result == 0
+               || SRpnt->sr_sense_buffer[2] != UNIT_ATTENTION)
+               break;
        }
 
-       buffer = (unsigned char *) scsi_malloc(512);
-       if (!buffer) {
-               printk(KERN_WARNING "(sd_init_onedisk:) Memory allocation failure.\n");
-               scsi_release_request(SRpnt);
-               return i;
+       /*
+        * If the drive has indicated to us that it doesn't have
+        * any media in it, don't bother with any of the rest of
+        * this crap.
+        */
+       if( the_result != 0
+           && ((driver_byte(the_result) & DRIVER_SENSE) != 0)
+           && SRpnt->sr_sense_buffer[2] == UNIT_ATTENTION
+           && SRpnt->sr_sense_buffer[12] == 0x3A ) {
+           rscsi_disks[i].capacity = 0x1fffff;
+           sector_size = 512;
+           rscsi_disks[i].device->changed = 1;
+           rscsi_disks[i].ready = 0;
+           break;
        }
 
-       spintime = 0;
-
-       /* Spin up drives, as required.  Only do this at boot time */
-       /* Spinup needs to be done for module loads too. */
-       do {
-               retries = 0;
-
-               while (retries < 3) {
-                       cmd[0] = TEST_UNIT_READY;
-                       cmd[1] = (rscsi_disks[i].device->scsi_level <= SCSI_2) ?
-                                ((rscsi_disks[i].device->lun << 5) & 0xe0) : 0;
-                       memset((void *) &cmd[2], 0, 8);
-                       SRpnt->sr_cmd_len = 0;
-                       SRpnt->sr_sense_buffer[0] = 0;
-                       SRpnt->sr_sense_buffer[2] = 0;
-                       SRpnt->sr_data_direction = SCSI_DATA_NONE;
-
-                       scsi_wait_req (SRpnt, (void *) cmd, (void *) buffer,
-                               0/*512*/, SD_TIMEOUT, MAX_RETRIES);
-
-                       the_result = SRpnt->sr_result;
-                       retries++;
-                       if (the_result == 0
-                           || SRpnt->sr_sense_buffer[2] != UNIT_ATTENTION)
-                               break;
-               }
-
-               /*
-                * If the drive has indicated to us that it doesn't have
-                * any media in it, don't bother with any of the rest of
-                * this crap.
-                */
-               if( the_result != 0
-                   && ((driver_byte(the_result) & DRIVER_SENSE) != 0)
-                   && SRpnt->sr_sense_buffer[2] == UNIT_ATTENTION
-                   && SRpnt->sr_sense_buffer[12] == 0x3A ) {
-                       rscsi_disks[i].capacity = 0x1fffff;
-                       sector_size = 512;
-                       rscsi_disks[i].device->changed = 1;
-                       rscsi_disks[i].ready = 0;
-                       break;
-               }
-
-               /* Look for non-removable devices that return NOT_READY.
-                * Issue command to spin up drive for these cases. */
-               if (the_result && !rscsi_disks[i].device->removable &&
-                   SRpnt->sr_sense_buffer[2] == NOT_READY) {
-                       unsigned long time1;
-                       if (!spintime) {
-                               printk("%s: Spinning up disk...", nbuff);
-                               cmd[0] = START_STOP;
-                               cmd[1] = (rscsi_disks[i].device->scsi_level <= SCSI_2) ?
-                                        ((rscsi_disks[i].device->lun << 5) & 0xe0) : 0;
-                               cmd[1] |= 1;    /* Return immediately */
-                               memset((void *) &cmd[2], 0, 8);
-                               cmd[4] = 1;     /* Start spin cycle */
-                               SRpnt->sr_cmd_len = 0;
-                               SRpnt->sr_sense_buffer[0] = 0;
-                               SRpnt->sr_sense_buffer[2] = 0;
-
-                               SRpnt->sr_data_direction = SCSI_DATA_READ;
-                               scsi_wait_req(SRpnt, (void *) cmd, (void *) buffer,
-                                           0/*512*/, SD_TIMEOUT, MAX_RETRIES);
-                               spintime_value = jiffies;
-                       }
-                       spintime = 1;
-                       time1 = HZ;
-                       /* Wait 1 second for next try */
-                       do {
-                               current->state = TASK_UNINTERRUPTIBLE;
-                               time1 = schedule_timeout(time1);
-                       } while(time1);
-                       printk(".");
-               }
-       } while (the_result && spintime &&
-                time_after(spintime_value + 100 * HZ, jiffies));
-       if (spintime) {
-               if (the_result)
-                       printk("not responding...\n");
-               else
-                       printk("ready\n");
-       }
-       retries = 3;
-       do {
-               cmd[0] = READ_CAPACITY;
+       /* Look for non-removable devices that return NOT_READY.
+        * Issue command to spin up drive for these cases. */
+       if (the_result && !rscsi_disks[i].device->removable &&
+           SRpnt->sr_sense_buffer[2] == NOT_READY) {
+           unsigned long time1;
+           if (!spintime) {
+               printk("%s: Spinning up disk...", nbuff);
+               cmd[0] = START_STOP;
                cmd[1] = (rscsi_disks[i].device->scsi_level <= SCSI_2) ?
-                        ((rscsi_disks[i].device->lun << 5) & 0xe0) : 0;
+                   ((rscsi_disks[i].device->lun << 5) & 0xe0) : 0;
+               cmd[1] |= 1;    /* Return immediately */
                memset((void *) &cmd[2], 0, 8);
-               memset((void *) buffer, 0, 8);
+               cmd[4] = 1;     /* Start spin cycle */
                SRpnt->sr_cmd_len = 0;
                SRpnt->sr_sense_buffer[0] = 0;
                SRpnt->sr_sense_buffer[2] = 0;
 
                SRpnt->sr_data_direction = SCSI_DATA_READ;
                scsi_wait_req(SRpnt, (void *) cmd, (void *) buffer,
-                           8, SD_TIMEOUT, MAX_RETRIES);
-
-               the_result = SRpnt->sr_result;
-               retries--;
-
-       } while (the_result && retries);
-
+                             0/*512*/, SD_TIMEOUT, MAX_RETRIES);
+               spintime_value = jiffies;
+           }
+           spintime = 1;
+           time1 = HZ;
+           /* Wait 1 second for next try */
+           do {
+               current->state = TASK_UNINTERRUPTIBLE;
+               time1 = schedule_timeout(time1);
+           } while(time1);
+           printk(".");
+       }
+    } while (the_result && spintime &&
+            time_after(spintime_value + 100 * HZ, jiffies));
+    if (spintime) {
+       if (the_result)
+           printk("not responding...\n");
+       else
+           printk("ready\n");
+    }
+    retries = 3;
+    do {
+       cmd[0] = READ_CAPACITY;
+       cmd[1] = (rscsi_disks[i].device->scsi_level <= SCSI_2) ?
+           ((rscsi_disks[i].device->lun << 5) & 0xe0) : 0;
+       memset((void *) &cmd[2], 0, 8);
+       memset((void *) buffer, 0, 8);
+       SRpnt->sr_cmd_len = 0;
+       SRpnt->sr_sense_buffer[0] = 0;
+       SRpnt->sr_sense_buffer[2] = 0;
+
+       SRpnt->sr_data_direction = SCSI_DATA_READ;
+       scsi_wait_req(SRpnt, (void *) cmd, (void *) buffer,
+                     8, SD_TIMEOUT, MAX_RETRIES);
+
+       the_result = SRpnt->sr_result;
+       retries--;
+
+    } while (the_result && retries);
+
+    /*
+     * The SCSI standard says:
+     * "READ CAPACITY is necessary for self configuring software"
+     *  While not mandatory, support of READ CAPACITY is strongly
+     *  encouraged.
+     *  We used to die if we couldn't successfully do a READ CAPACITY.
+     *  But, now we go on about our way.  The side effects of this are
+     *
+     *  1. We can't know block size with certainty. I have said
+     *     "512 bytes is it" as this is most common.
+     *
+     *  2. Recovery from when someone attempts to read past the
+     *     end of the raw device will be slower.
+     */
+
+    if (the_result) {
+       printk("%s : READ CAPACITY failed.\n"
+              "%s : status = %x, message = %02x, host = %d, driver = %02x \n",
+              nbuff, nbuff,
+              status_byte(the_result),
+              msg_byte(the_result),
+              host_byte(the_result),
+              driver_byte(the_result)
+           );
+       if (driver_byte(the_result) & DRIVER_SENSE)
+           print_req_sense("sd", SRpnt);
+       else
+           printk("%s : sense not available. \n", nbuff);
+
+       printk("%s : block size assumed to be 512 bytes, disk size 1GB.  \n",
+              nbuff);
+       rscsi_disks[i].capacity = 0x1fffff;
+       sector_size = 512;
+
+       /* Set dirty bit for removable devices if not ready -
+        * sometimes drives will not report this properly. */
+       if (rscsi_disks[i].device->removable &&
+           SRpnt->sr_sense_buffer[2] == NOT_READY)
+           rscsi_disks[i].device->changed = 1;
+
+    } else {
        /*
-        * The SCSI standard says:
-        * "READ CAPACITY is necessary for self configuring software"
-        *  While not mandatory, support of READ CAPACITY is strongly
-        *  encouraged.
-        *  We used to die if we couldn't successfully do a READ CAPACITY.
-        *  But, now we go on about our way.  The side effects of this are
-        *
-        *  1. We can't know block size with certainty. I have said
-        *     "512 bytes is it" as this is most common.
-        *
-        *  2. Recovery from when someone attempts to read past the
-        *     end of the raw device will be slower.
+        * FLOPTICAL, if read_capa is ok, drive is assumed to be ready
         */
+       rscsi_disks[i].ready = 1;
 
-       if (the_result) {
-               printk("%s : READ CAPACITY failed.\n"
-                      "%s : status = %x, message = %02x, host = %d, driver = %02x \n",
-                      nbuff, nbuff,
-                      status_byte(the_result),
-                      msg_byte(the_result),
-                      host_byte(the_result),
-                      driver_byte(the_result)
-                   );
-               if (driver_byte(the_result) & DRIVER_SENSE)
-                       print_req_sense("sd", SRpnt);
-               else
-                       printk("%s : sense not available. \n", nbuff);
-
-               printk("%s : block size assumed to be 512 bytes, disk size 1GB.  \n",
-                      nbuff);
-               rscsi_disks[i].capacity = 0x1fffff;
-               sector_size = 512;
-
-               /* Set dirty bit for removable devices if not ready -
-                * sometimes drives will not report this properly. */
-               if (rscsi_disks[i].device->removable &&
-                   SRpnt->sr_sense_buffer[2] == NOT_READY)
-                       rscsi_disks[i].device->changed = 1;
+       rscsi_disks[i].capacity = 1 + ((buffer[0] << 24) |
+                                      (buffer[1] << 16) |
+                                      (buffer[2] << 8) |
+                                      buffer[3]);
 
-       } else {
-               /*
-                * FLOPTICAL, if read_capa is ok, drive is assumed to be ready
-                */
-               rscsi_disks[i].ready = 1;
-
-               rscsi_disks[i].capacity = 1 + ((buffer[0] << 24) |
-                                              (buffer[1] << 16) |
-                                              (buffer[2] << 8) |
-                                              buffer[3]);
-
-               sector_size = (buffer[4] << 24) |
-                   (buffer[5] << 16) | (buffer[6] << 8) | buffer[7];
-
-               if (sector_size == 0) {
-                       sector_size = 512;
-                       printk("%s : sector size 0 reported, assuming 512.\n",
-                              nbuff);
-               }
-               if (sector_size != 512 &&
-                   sector_size != 1024 &&
-                   sector_size != 2048 &&
-                   sector_size != 4096 &&
-                   sector_size != 256) {
-                       printk("%s : unsupported sector size %d.\n",
-                              nbuff, sector_size);
-                       /*
-                        * The user might want to re-format the drive with
-                        * a supported sectorsize.  Once this happens, it
-                        * would be relatively trivial to set the thing up.
-                        * For this reason, we leave the thing in the table.
-                        */
-                       rscsi_disks[i].capacity = 0;
-               }
-               if (sector_size > 1024) {
-                       int m;
-
-                       /*
-                        * We must fix the sd_blocksizes and sd_hardsizes
-                        * to allow us to read the partition tables.
-                        * The disk reading code does not allow for reading
-                        * of partial sectors.
-                        */
-                       for (m = i << 4; m < ((i + 1) << 4); m++) {
-                               sd_blocksizes[m] = sector_size;
-                       }
-               } {
-                       /*
-                        * The msdos fs needs to know the hardware sector size
-                        * So I have created this table. See ll_rw_blk.c
-                        * Jacques Gelinas (Jacques@solucorp.qc.ca)
-                        */
-                       int m;
-                       int hard_sector = sector_size;
-                       int sz = rscsi_disks[i].capacity * (hard_sector/256);
-
-                       /* There are 16 minors allocated for each major device */
-                       for (m = i << 4; m < ((i + 1) << 4); m++) {
-                               sd_hardsizes[m] = hard_sector;
-                       }
-
-                       printk("SCSI device %s: "
-                              "%d %d-byte hdwr sectors (%d MB)\n",
-                              nbuff, rscsi_disks[i].capacity,
-                              hard_sector, (sz/2 - sz/1250 + 974)/1950);
-               }
+       sector_size = (buffer[4] << 24) |
+           (buffer[5] << 16) | (buffer[6] << 8) | buffer[7];
 
-               /* Rescale capacity to 512-byte units */
-               if (sector_size == 4096)
-                       rscsi_disks[i].capacity <<= 3;
-               if (sector_size == 2048)
-                       rscsi_disks[i].capacity <<= 2;
-               if (sector_size == 1024)
-                       rscsi_disks[i].capacity <<= 1;
-               if (sector_size == 256)
-                       rscsi_disks[i].capacity >>= 1;
+       if (sector_size == 0) {
+           sector_size = 512;
+           printk("%s : sector size 0 reported, assuming 512.\n",
+                  nbuff);
+       }
+       if (sector_size != 512 &&
+           sector_size != 1024 &&
+           sector_size != 2048 &&
+           sector_size != 4096 &&
+           sector_size != 256) {
+           printk("%s : unsupported sector size %d.\n",
+                  nbuff, sector_size);
+           /*
+            * The user might want to re-format the drive with
+            * a supported sectorsize.  Once this happens, it
+            * would be relatively trivial to set the thing up.
+            * For this reason, we leave the thing in the table.
+            */
+           rscsi_disks[i].capacity = 0;
+       }
+       if (sector_size > 1024) {
+           int m;
+
+           /*
+            * We must fix the sd_blocksizes and sd_hardsizes
+            * to allow us to read the partition tables.
+            * The disk reading code does not allow for reading
+            * of partial sectors.
+            */
+           for (m = i << 4; m < ((i + 1) << 4); m++) {
+               sd_blocksizes[m] = sector_size;
+           }
+       } {
+           /*
+            * The msdos fs needs to know the hardware sector size
+            * So I have created this table. See ll_rw_blk.c
+            * Jacques Gelinas (Jacques@solucorp.qc.ca)
+            */
+           int m;
+           int hard_sector = sector_size;
+           int sz = rscsi_disks[i].capacity * (hard_sector/256);
+
+           /* There are 16 minors allocated for each major device */
+           for (m = i << 4; m < ((i + 1) << 4); m++) {
+               sd_hardsizes[m] = hard_sector;
+           }
+
+           printk("SCSI device %s: "
+                  "%d %d-byte hdwr sectors (%d MB)\n",
+                  nbuff, rscsi_disks[i].capacity,
+                  hard_sector, (sz/2 - sz/1250 + 974)/1950);
        }
 
+       /* Rescale capacity to 512-byte units */
+       if (sector_size == 4096)
+           rscsi_disks[i].capacity <<= 3;
+       if (sector_size == 2048)
+           rscsi_disks[i].capacity <<= 2;
+       if (sector_size == 1024)
+           rscsi_disks[i].capacity <<= 1;
+       if (sector_size == 256)
+           rscsi_disks[i].capacity >>= 1;
+    }
+
+
+    /*
+     * Unless otherwise specified, this is not write protected.
+     */
+    rscsi_disks[i].write_prot = 0;
+    if (rscsi_disks[i].device->removable && rscsi_disks[i].ready) {
+       /* FLOPTICAL */
 
        /*
-        * Unless otherwise specified, this is not write protected.
+        * For removable scsi disk ( FLOPTICAL ) we have to recognise
+        * the Write Protect Flag. This flag is kept in the Scsi_Disk
+        * struct and tested at open !
+        * Daniel Roche ( dan@lectra.fr )
+        *
+        * Changed to get all pages (0x3f) rather than page 1 to
+        * get around devices which do not have a page 1.  Since
+        * we're only interested in the header anyway, this should
+        * be fine.
+        *   -- Matthew Dharm (mdharm-scsi@one-eyed-alien.net)
         */
-       rscsi_disks[i].write_prot = 0;
-       if (rscsi_disks[i].device->removable && rscsi_disks[i].ready) {
-               /* FLOPTICAL */
-
-               /*
-                * For removable scsi disk ( FLOPTICAL ) we have to recognise
-                * the Write Protect Flag. This flag is kept in the Scsi_Disk
-                * struct and tested at open !
-                * Daniel Roche ( dan@lectra.fr )
-                *
-                * Changed to get all pages (0x3f) rather than page 1 to
-                * get around devices which do not have a page 1.  Since
-                * we're only interested in the header anyway, this should
-                * be fine.
-                *   -- Matthew Dharm (mdharm-scsi@one-eyed-alien.net)
-                */
-
-               memset((void *) &cmd[0], 0, 8);
-               cmd[0] = MODE_SENSE;
-               cmd[1] = (rscsi_disks[i].device->scsi_level <= SCSI_2) ?
-                        ((rscsi_disks[i].device->lun << 5) & 0xe0) : 0;
-               cmd[2] = 0x3f;  /* Get all pages */
-               cmd[4] = 255;   /* Ask for 255 bytes, even tho we want just the first 8 */
-               SRpnt->sr_cmd_len = 0;
-               SRpnt->sr_sense_buffer[0] = 0;
-               SRpnt->sr_sense_buffer[2] = 0;
 
-               /* same code as READCAPA !! */
-               SRpnt->sr_data_direction = SCSI_DATA_READ;
-               scsi_wait_req(SRpnt, (void *) cmd, (void *) buffer,
-                           512, SD_TIMEOUT, MAX_RETRIES);
+       memset((void *) &cmd[0], 0, 8);
+       cmd[0] = MODE_SENSE;
+       cmd[1] = (rscsi_disks[i].device->scsi_level <= SCSI_2) ?
+           ((rscsi_disks[i].device->lun << 5) & 0xe0) : 0;
+       cmd[2] = 0x3f;  /* Get all pages */
+       cmd[4] = 255;   /* Ask for 255 bytes, even tho we want just the first 8 */
+       SRpnt->sr_cmd_len = 0;
+       SRpnt->sr_sense_buffer[0] = 0;
+       SRpnt->sr_sense_buffer[2] = 0;
 
-               the_result = SRpnt->sr_result;
+       /* same code as READCAPA !! */
+       SRpnt->sr_data_direction = SCSI_DATA_READ;
+       scsi_wait_req(SRpnt, (void *) cmd, (void *) buffer,
+                     512, SD_TIMEOUT, MAX_RETRIES);
 
-               if (the_result) {
-                       printk("%s: test WP failed, assume Write Enabled\n", nbuff);
-               } else {
-                       rscsi_disks[i].write_prot = ((buffer[2] & 0x80) != 0);
-                       printk("%s: Write Protect is %s\n", nbuff,
-                              rscsi_disks[i].write_prot ? "on" : "off");
-               }
+       the_result = SRpnt->sr_result;
 
-       }                       /* check for write protect */
-       SRpnt->sr_device->ten = 1;
-       SRpnt->sr_device->remap = 1;
-       SRpnt->sr_device->sector_size = sector_size;
-       /* Wake up a process waiting for device */
-       scsi_release_request(SRpnt);
-       SRpnt = NULL;
+       if (the_result) {
+           printk("%s: test WP failed, assume Write Enabled\n", nbuff);
+       } else {
+           rscsi_disks[i].write_prot = ((buffer[2] & 0x80) != 0);
+           printk("%s: Write Protect is %s\n", nbuff,
+                  rscsi_disks[i].write_prot ? "on" : "off");
+       }
 
-       scsi_free(buffer, 512);
-       return i;
+    }                  /* check for write protect */
+    SRpnt->sr_device->ten = 1;
+    SRpnt->sr_device->remap = 1;
+    SRpnt->sr_device->sector_size = sector_size;
+    /* Wake up a process waiting for device */
+    scsi_release_request(SRpnt);
+    SRpnt = NULL;
+
+    scsi_free(buffer, 512);
+    return i;
 }
 
 /*
@@ -1092,221 +1108,246 @@ static int sd_registered;
 
 static int sd_init()
 {
-       int i;
-
-       if (sd_template.dev_noticed == 0)
-               return 0;
+    int i;
 
-       if (!rscsi_disks)
-               sd_template.dev_max = sd_template.dev_noticed + SD_EXTRA_DEVS;
+    if (sd_template.dev_noticed == 0)
+       return 0;
 
-       if (sd_template.dev_max > N_SD_MAJORS * SCSI_DISKS_PER_MAJOR)
-               sd_template.dev_max = N_SD_MAJORS * SCSI_DISKS_PER_MAJOR;
+    if (!rscsi_disks)
+       sd_template.dev_max = sd_template.dev_noticed + SD_EXTRA_DEVS;
 
-       if (!sd_registered) {
-               for (i = 0; i < N_USED_SD_MAJORS; i++) {
-                       if (devfs_register_blkdev(SD_MAJOR(i), "sd", &sd_fops)) {
-                               printk("Unable to get major %d for SCSI disk\n", SD_MAJOR(i));
-                               sd_template.dev_noticed = 0;
-                               return 1;
-                       }
-               }
-               sd_registered++;
-       }
-       /* We do not support attaching loadable devices yet. */
-       if (rscsi_disks)
-               return 0;
-
-       rscsi_disks = kmalloc(sd_template.dev_max * sizeof(Scsi_Disk), GFP_ATOMIC);
-       if (!rscsi_disks)
-               goto cleanup_devfs;
-       memset(rscsi_disks, 0, sd_template.dev_max * sizeof(Scsi_Disk));
-
-       /* for every (necessary) major: */
-       sd_sizes = kmalloc((sd_template.dev_max << 4) * sizeof(int), GFP_ATOMIC);
-       if (!sd_sizes)
-               goto cleanup_disks;
-       memset(sd_sizes, 0, (sd_template.dev_max << 4) * sizeof(int));
-
-       sd_blocksizes = kmalloc((sd_template.dev_max << 4) * sizeof(int), GFP_ATOMIC);
-       if (!sd_blocksizes)
-               goto cleanup_sizes;
-       
-       sd_hardsizes = kmalloc((sd_template.dev_max << 4) * sizeof(int), GFP_ATOMIC);
-       if (!sd_hardsizes)
-               goto cleanup_blocksizes;
-
-       sd_max_sectors = kmalloc((sd_template.dev_max << 4) * sizeof(int), GFP_ATOMIC);
-       if (!sd_max_sectors)
-               goto cleanup_max_sectors;
-
-       for (i = 0; i < sd_template.dev_max << 4; i++) {
-               sd_blocksizes[i] = 1024;
-               sd_hardsizes[i] = 512;
-               /*
-                * Allow lowlevel device drivers to generate 512k large scsi
-                * commands if they know what they're doing and they ask for it
-                * explicitly via the SHpnt->max_sectors API.
-                */
-               sd_max_sectors[i] = MAX_SEGMENTS*8;
-       }
+    if (sd_template.dev_max > N_SD_MAJORS * SCSI_DISKS_PER_MAJOR)
+       sd_template.dev_max = N_SD_MAJORS * SCSI_DISKS_PER_MAJOR;
 
+    if (!sd_registered) {
        for (i = 0; i < N_USED_SD_MAJORS; i++) {
-               blksize_size[SD_MAJOR(i)] = sd_blocksizes + i * (SCSI_DISKS_PER_MAJOR << 4);
-               hardsect_size[SD_MAJOR(i)] = sd_hardsizes + i * (SCSI_DISKS_PER_MAJOR << 4);
-               max_sectors[SD_MAJOR(i)] = sd_max_sectors + i * (SCSI_DISKS_PER_MAJOR << 4);
+#ifdef DEVFS_MUST_DIE
+           if (devfs_register_blkdev(SD_MAJOR(i), "sd", &sd_fops)) {
+               printk("Unable to get major %d for SCSI disk\n", SD_MAJOR(i));
+               sd_template.dev_noticed = 0;
+               return 1;
+           }
+#endif
        }
+       sd_registered++;
+    }
+    /* We do not support attaching loadable devices yet. */
+    if (rscsi_disks)
+       return 0;
 
-       sd_gendisks = kmalloc(N_USED_SD_MAJORS * sizeof(struct gendisk), GFP_ATOMIC);
-       if (!sd_gendisks)
-               goto cleanup_sd_gendisks;
-       for (i = 0; i < N_USED_SD_MAJORS; i++) {
-               sd_gendisks[i] = sd_gendisk;    /* memcpy */
-               sd_gendisks[i].de_arr = kmalloc (SCSI_DISKS_PER_MAJOR * sizeof *sd_gendisks[i].de_arr,
-                                                 GFP_ATOMIC);
-               if (!sd_gendisks[i].de_arr)
-                       goto cleanup_gendisks_de_arr;
-                memset (sd_gendisks[i].de_arr, 0,
-                        SCSI_DISKS_PER_MAJOR * sizeof *sd_gendisks[i].de_arr);
-               sd_gendisks[i].flags = kmalloc (SCSI_DISKS_PER_MAJOR * sizeof *sd_gendisks[i].flags,
-                                                GFP_ATOMIC);
-               if (!sd_gendisks[i].flags)
-                       goto cleanup_gendisks_flags;
-                memset (sd_gendisks[i].flags, 0,
-                        SCSI_DISKS_PER_MAJOR * sizeof *sd_gendisks[i].flags);
-               sd_gendisks[i].major = SD_MAJOR(i);
-               sd_gendisks[i].major_name = "sd";
-               sd_gendisks[i].minor_shift = 4;
-               sd_gendisks[i].max_p = 1 << 4;
-               sd_gendisks[i].part = kmalloc((SCSI_DISKS_PER_MAJOR << 4) * sizeof(struct hd_struct),
-                                               GFP_ATOMIC);
-               if (!sd_gendisks[i].part)
-                       goto cleanup_gendisks_part;
-               memset(sd_gendisks[i].part, 0, (SCSI_DISKS_PER_MAJOR << 4) * sizeof(struct hd_struct));
-               sd_gendisks[i].sizes = sd_sizes + (i * SCSI_DISKS_PER_MAJOR << 4);
-               sd_gendisks[i].nr_real = 0;
-               sd_gendisks[i].real_devices =
-                   (void *) (rscsi_disks + i * SCSI_DISKS_PER_MAJOR);
-       }
+    rscsi_disks = kmalloc(sd_template.dev_max * sizeof(Scsi_Disk), GFP_ATOMIC);
+    if (!rscsi_disks)
+       goto cleanup_devfs;
+    memset(rscsi_disks, 0, sd_template.dev_max * sizeof(Scsi_Disk));
+
+    /* for every (necessary) major: */
+    sd_sizes = kmalloc((sd_template.dev_max << 4) * sizeof(int), GFP_ATOMIC);
+    if (!sd_sizes)
+       goto cleanup_disks;
+    memset(sd_sizes, 0, (sd_template.dev_max << 4) * sizeof(int));
+
+    sd_blocksizes = kmalloc((sd_template.dev_max << 4) * sizeof(int), 
+                           GFP_ATOMIC);
+    if (!sd_blocksizes)
+       goto cleanup_sizes;
+       
+    sd_hardsizes = kmalloc((sd_template.dev_max << 4) * sizeof(int), 
+                          GFP_ATOMIC);
+    if (!sd_hardsizes)
+       goto cleanup_blocksizes;
+
+    sd_max_sectors = kmalloc((sd_template.dev_max << 4) * sizeof(int), 
+                            GFP_ATOMIC);
+    if (!sd_max_sectors)
+       goto cleanup_max_sectors;
+
+    for (i = 0; i < sd_template.dev_max << 4; i++) {
+       sd_blocksizes[i] = 1024;
+       sd_hardsizes[i] = 512;
+       /*
+        * Allow lowlevel device drivers to generate 512k large scsi
+        * commands if they know what they're doing and they ask for it
+        * explicitly via the SHpnt->max_sectors API.
+        */
+       sd_max_sectors[i] = MAX_SEGMENTS*8;
+    }
+
+    for (i = 0; i < N_USED_SD_MAJORS; i++) {
+       blksize_size[SD_MAJOR(i)] = sd_blocksizes + 
+           i * (SCSI_DISKS_PER_MAJOR << 4);
+       hardsect_size[SD_MAJOR(i)] = sd_hardsizes + 
+           i * (SCSI_DISKS_PER_MAJOR << 4);
+       max_sectors[SD_MAJOR(i)] = sd_max_sectors + 
+           i * (SCSI_DISKS_PER_MAJOR << 4);
+    }
+
+    sd_gendisks = kmalloc(N_USED_SD_MAJORS * sizeof(struct gendisk), 
+                         GFP_ATOMIC);
+    if (!sd_gendisks)
+       goto cleanup_sd_gendisks;
+    for (i = 0; i < N_USED_SD_MAJORS; i++) {
+       sd_gendisks[i] = sd_gendisk;    /* memcpy */
+#ifdef DEVFS_MUST_DIE
+       sd_gendisks[i].de_arr = kmalloc (SCSI_DISKS_PER_MAJOR * 
+                                        sizeof *sd_gendisks[i].de_arr,
+                                        GFP_ATOMIC);
+       if (!sd_gendisks[i].de_arr)
+           goto cleanup_gendisks_de_arr;
+       memset (sd_gendisks[i].de_arr, 0,
+               SCSI_DISKS_PER_MAJOR * sizeof *sd_gendisks[i].de_arr);
+#endif
+       sd_gendisks[i].flags = kmalloc (SCSI_DISKS_PER_MAJOR * 
+                                       sizeof *sd_gendisks[i].flags,
+                                       GFP_ATOMIC);
+       if (!sd_gendisks[i].flags)
+           goto cleanup_gendisks_flags;
+       memset (sd_gendisks[i].flags, 0,
+               SCSI_DISKS_PER_MAJOR * sizeof *sd_gendisks[i].flags);
+       sd_gendisks[i].major = SD_MAJOR(i);
+       sd_gendisks[i].major_name = "sd";
+       sd_gendisks[i].minor_shift = 4;
+       sd_gendisks[i].max_p = 1 << 4;
+       sd_gendisks[i].part = kmalloc((SCSI_DISKS_PER_MAJOR << 4) * 
+                                     sizeof(struct hd_struct),
+                                     GFP_ATOMIC);
+       if (!sd_gendisks[i].part)
+           goto cleanup_gendisks_part;
+       memset(sd_gendisks[i].part, 0, (SCSI_DISKS_PER_MAJOR << 4) * 
+              sizeof(struct hd_struct));
+       sd_gendisks[i].sizes = sd_sizes + (i * SCSI_DISKS_PER_MAJOR << 4);
+       sd_gendisks[i].nr_real = 0;
+       sd_gendisks[i].real_devices =
+           (void *) (rscsi_disks + i * SCSI_DISKS_PER_MAJOR);
+    }
 
-       return 0;
+    return 0;
 
-cleanup_gendisks_part:
-       kfree(sd_gendisks[i].flags);
-cleanup_gendisks_flags:
+ cleanup_gendisks_part:
+    kfree(sd_gendisks[i].flags);
+ cleanup_gendisks_flags:
+#ifdef DEVFS_MUST_DIE
+    kfree(sd_gendisks[i].de_arr);
+ cleanup_gendisks_de_arr:
+#endif
+    while (--i >= 0 ) {
+#ifdef DEVFS_MUST_DIE
        kfree(sd_gendisks[i].de_arr);
-cleanup_gendisks_de_arr:
-       while (--i >= 0 ) {
-               kfree(sd_gendisks[i].de_arr);
-               kfree(sd_gendisks[i].flags);
-               kfree(sd_gendisks[i].part);
-       }
-       kfree(sd_gendisks);
-       sd_gendisks = NULL;
-cleanup_sd_gendisks:
-       kfree(sd_max_sectors);
-cleanup_max_sectors:
-       kfree(sd_hardsizes);
-cleanup_blocksizes:
-       kfree(sd_blocksizes);
-cleanup_sizes:
-       kfree(sd_sizes);
-cleanup_disks:
-       kfree(rscsi_disks);
-       rscsi_disks = NULL;
-cleanup_devfs:
-       for (i = 0; i < N_USED_SD_MAJORS; i++) {
-               devfs_unregister_blkdev(SD_MAJOR(i), "sd");
-       }
-       sd_registered--;
-       sd_template.dev_noticed = 0;
-       return 1;
+#endif
+       kfree(sd_gendisks[i].flags);
+       kfree(sd_gendisks[i].part);
+    }
+    kfree(sd_gendisks);
+    sd_gendisks = NULL;
+ cleanup_sd_gendisks:
+    kfree(sd_max_sectors);
+ cleanup_max_sectors:
+    kfree(sd_hardsizes);
+ cleanup_blocksizes:
+    kfree(sd_blocksizes);
+ cleanup_sizes:
+    kfree(sd_sizes);
+ cleanup_disks:
+    kfree(rscsi_disks);
+    rscsi_disks = NULL;
+ cleanup_devfs:
+#ifdef DEVFS_MUST_DIE
+    for (i = 0; i < N_USED_SD_MAJORS; i++) {
+       devfs_unregister_blkdev(SD_MAJOR(i), "sd");
+    }
+#endif
+    sd_registered--;
+    sd_template.dev_noticed = 0;
+    return 1;
 }
 
 
 static void sd_finish()
 {
-       int i;
-
-       for (i = 0; i < N_USED_SD_MAJORS; i++) {
-               blk_dev[SD_MAJOR(i)].queue = sd_find_queue;
-               add_gendisk(&sd_gendisks[i]);
-       }
+    int i;
 
-       for (i = 0; i < sd_template.dev_max; ++i)
-               if (!rscsi_disks[i].capacity && rscsi_disks[i].device) {
-                       sd_init_onedisk(i);
-                       if (!rscsi_disks[i].has_part_table) {
-                               sd_sizes[i << 4] = rscsi_disks[i].capacity;
-                               register_disk(&SD_GENDISK(i), MKDEV_SD(i),
-                                               1<<4, &sd_fops,
-                                               rscsi_disks[i].capacity);
-                               rscsi_disks[i].has_part_table = 1;
-                       }
-               }
-       /* If our host adapter is capable of scatter-gather, then we increase
-        * the read-ahead to 60 blocks (120 sectors).  If not, we use
-        * a two block (4 sector) read ahead. We can only respect this with the
-        * granularity of every 16 disks (one device major).
-        */
-       for (i = 0; i < N_USED_SD_MAJORS; i++) {
-               read_ahead[SD_MAJOR(i)] =
-                   (rscsi_disks[i * SCSI_DISKS_PER_MAJOR].device
-                    && rscsi_disks[i * SCSI_DISKS_PER_MAJOR].device->host->sg_tablesize)
-                   ? 120       /* 120 sector read-ahead */
-                   : 4;        /* 4 sector read-ahead */
+    for (i = 0; i < N_USED_SD_MAJORS; i++) {
+       blk_dev[SD_MAJOR(i)].queue = sd_find_queue;
+       add_gendisk(&sd_gendisks[i]);
+    }
+
+    for (i = 0; i < sd_template.dev_max; ++i)
+       if (!rscsi_disks[i].capacity && rscsi_disks[i].device) {
+           sd_init_onedisk(i);
+           if (!rscsi_disks[i].has_part_table) {
+               sd_sizes[i << 4] = rscsi_disks[i].capacity;
+               register_disk(&SD_GENDISK(i), MKDEV_SD(i),
+                             1<<4, &sd_fops,
+                             rscsi_disks[i].capacity);
+               rscsi_disks[i].has_part_table = 1;
+           }
        }
+#if 0 
+    /* If our host adapter is capable of scatter-gather, then we increase
+     * the read-ahead to 60 blocks (120 sectors).  If not, we use
+     * a two block (4 sector) read ahead. We can only respect this with the
+     * granularity of every 16 disks (one device major).
+     */
+    for (i = 0; i < N_USED_SD_MAJORS; i++) {
+       read_ahead[SD_MAJOR(i)] =
+           (rscsi_disks[i * SCSI_DISKS_PER_MAJOR].device
+            && rscsi_disks[i * SCSI_DISKS_PER_MAJOR].device->host->sg_tablesize)
+           ? 120       /* 120 sector read-ahead */
+           : 4;        /* 4 sector read-ahead */
+    }
+#endif
 
-       return;
+    return;
 }
 
 static int sd_detect(Scsi_Device * SDp)
 {
-       if (SDp->type != TYPE_DISK && SDp->type != TYPE_MOD)
-               return 0;
-       sd_template.dev_noticed++;
-       return 1;
+    if (SDp->type != TYPE_DISK && SDp->type != TYPE_MOD)
+       return 0;
+    sd_template.dev_noticed++;
+    return 1;
 }
 
 static int sd_attach(Scsi_Device * SDp)
 {
-        unsigned int devnum;
-       Scsi_Disk *dpnt;
-       int i;
-       char nbuff[6];
+    unsigned int devnum;
+    Scsi_Disk *dpnt;
+    int i;
+    char nbuff[6];
 
-       if (SDp->type != TYPE_DISK && SDp->type != TYPE_MOD)
-               return 0;
+    if (SDp->type != TYPE_DISK && SDp->type != TYPE_MOD)
+       return 0;
 
-       if (sd_template.nr_dev >= sd_template.dev_max || rscsi_disks == NULL) {
-               SDp->attached--;
-               return 1;
-       }
-       for (dpnt = rscsi_disks, i = 0; i < sd_template.dev_max; i++, dpnt++)
-               if (!dpnt->device)
-                       break;
-
-       if (i >= sd_template.dev_max) {
-               printk(KERN_WARNING "scsi_devices corrupt (sd),"
-                   " nr_dev %d dev_max %d\n",
-                   sd_template.nr_dev, sd_template.dev_max);
-               SDp->attached--;
-               return 1;
-       }
+    if (sd_template.nr_dev >= sd_template.dev_max || rscsi_disks == NULL) {
+       SDp->attached--;
+       return 1;
+    }
+    for (dpnt = rscsi_disks, i = 0; i < sd_template.dev_max; i++, dpnt++)
+       if (!dpnt->device)
+           break;
 
-       rscsi_disks[i].device = SDp;
-       rscsi_disks[i].has_part_table = 0;
-       sd_template.nr_dev++;
-       SD_GENDISK(i).nr_real++;
-        devnum = i % SCSI_DISKS_PER_MAJOR;
-        SD_GENDISK(i).de_arr[devnum] = SDp->de;
-        if (SDp->removable)
-               SD_GENDISK(i).flags[devnum] |= GENHD_FL_REMOVABLE;
-       sd_devname(i, nbuff);
-       printk("Attached scsi %sdisk %s at scsi%d, channel %d, id %d, lun %d\n",
-              SDp->removable ? "removable " : "",
-              nbuff, SDp->host->host_no, SDp->channel, SDp->id, SDp->lun);
-       return 0;
+    if (i >= sd_template.dev_max) {
+       printk(KERN_WARNING "scsi_devices corrupt (sd),"
+              " nr_dev %d dev_max %d\n",
+              sd_template.nr_dev, sd_template.dev_max);
+       SDp->attached--;
+       return 1;
+    }
+
+    rscsi_disks[i].device = SDp;
+    rscsi_disks[i].has_part_table = 0;
+    sd_template.nr_dev++;
+    SD_GENDISK(i).nr_real++;
+    devnum = i % SCSI_DISKS_PER_MAJOR;
+#ifdef DEVFS_MUST_DIE
+    SD_GENDISK(i).de_arr[devnum] = SDp->de;
+#endif
+    if (SDp->removable)
+       SD_GENDISK(i).flags[devnum] |= GENHD_FL_REMOVABLE;
+    sd_devname(i, nbuff);
+    printk("Attached scsi %sdisk %s at scsi%d, channel %d, id %d, lun %d\n",
+          SDp->removable ? "removable " : "",
+          nbuff, SDp->host->host_no, SDp->channel, SDp->id, SDp->lun);
+    return 0;
 }
 
 #define DEVICE_BUSY rscsi_disks[target].device->busy
@@ -1324,137 +1365,146 @@ static int sd_attach(Scsi_Device * SDp)
  */
 int revalidate_scsidisk(kdev_t dev, int maxusage)
 {
-       struct gendisk *sdgd;
-       int target;
-       int max_p;
-       int start;
-       int i;
+    struct gendisk *sdgd;
+    int target;
+    int max_p;
+    int start;
+    int i;
 
-       target = DEVICE_NR(dev);
+    target = DEVICE_NR(dev);
 
-       if (DEVICE_BUSY || (ALLOW_REVALIDATE == 0 && USAGE > maxusage)) {
-               printk("Device busy for revalidation (usage=%d)\n", USAGE);
-               return -EBUSY;
-       }
-       DEVICE_BUSY = 1;
+    if (DEVICE_BUSY || (ALLOW_REVALIDATE == 0 && USAGE > maxusage)) {
+       printk("Device busy for revalidation (usage=%d)\n", USAGE);
+       return -EBUSY;
+    }
+    DEVICE_BUSY = 1;
 
-       sdgd = &SD_GENDISK(target);
-       max_p = sd_gendisk.max_p;
-       start = target << sd_gendisk.minor_shift;
+    sdgd = &SD_GENDISK(target);
+    max_p = sd_gendisk.max_p;
+    start = target << sd_gendisk.minor_shift;
 
-       for (i = max_p - 1; i >= 0; i--) {
-               int index = start + i;
-               invalidate_device(MKDEV_SD_PARTITION(index), 1);
-               sdgd->part[SD_MINOR_NUMBER(index)].start_sect = 0;
-               sdgd->part[SD_MINOR_NUMBER(index)].nr_sects = 0;
-               /*
-                * Reset the blocksize for everything so that we can read
-                * the partition table.  Technically we will determine the
-                * correct block size when we revalidate, but we do this just
-                * to make sure that everything remains consistent.
-                */
-               sd_blocksizes[index] = 1024;
-               if (rscsi_disks[target].device->sector_size == 2048)
-                       sd_blocksizes[index] = 2048;
-               else
-                       sd_blocksizes[index] = 1024;
-       }
+    for (i = max_p - 1; i >= 0; i--) {
+       int index = start + i;
+       invalidate_device(MKDEV_SD_PARTITION(index), 1);
+       sdgd->part[SD_MINOR_NUMBER(index)].start_sect = 0;
+       sdgd->part[SD_MINOR_NUMBER(index)].nr_sects = 0;
+       /*
+        * Reset the blocksize for everything so that we can read
+        * the partition table.  Technically we will determine the
+        * correct block size when we revalidate, but we do this just
+        * to make sure that everything remains consistent.
+        */
+       sd_blocksizes[index] = 1024;
+       if (rscsi_disks[target].device->sector_size == 2048)
+           sd_blocksizes[index] = 2048;
+       else
+           sd_blocksizes[index] = 1024;
+    }
 
 #ifdef MAYBE_REINIT
-       MAYBE_REINIT;
+    MAYBE_REINIT;
 #endif
 
-       grok_partitions(&SD_GENDISK(target), target % SCSI_DISKS_PER_MAJOR,
-                       1<<4, CAPACITY);
+    grok_partitions(&SD_GENDISK(target), target % SCSI_DISKS_PER_MAJOR,
+                   1<<4, CAPACITY);
 
-       DEVICE_BUSY = 0;
-       return 0;
+    DEVICE_BUSY = 0;
+    return 0;
 }
 
 static int fop_revalidate_scsidisk(kdev_t dev)
 {
-       return revalidate_scsidisk(dev, 0);
+    return revalidate_scsidisk(dev, 0);
 }
+
 static void sd_detach(Scsi_Device * SDp)
 {
-       Scsi_Disk *dpnt;
-       struct gendisk *sdgd;
-       int i, j;
-       int max_p;
-       int start;
-
-       if (rscsi_disks == NULL)
-               return;
-
-       for (dpnt = rscsi_disks, i = 0; i < sd_template.dev_max; i++, dpnt++)
-               if (dpnt->device == SDp) {
-
-                       /* If we are disconnecting a disk driver, sync and invalidate
-                        * everything */
-                       sdgd = &SD_GENDISK(i);
-                       max_p = sd_gendisk.max_p;
-                       start = i << sd_gendisk.minor_shift;
-
-                       for (j = max_p - 1; j >= 0; j--) {
-                               int index = start + j;
-                               invalidate_device(MKDEV_SD_PARTITION(index), 1);
-                               sdgd->part[SD_MINOR_NUMBER(index)].start_sect = 0;
-                               sdgd->part[SD_MINOR_NUMBER(index)].nr_sects = 0;
-                               sd_sizes[index] = 0;
-                       }
-                        devfs_register_partitions (sdgd,
-                                                   SD_MINOR_NUMBER (start), 1);
-                       /* unregister_disk() */
-                       dpnt->has_part_table = 0;
-                       dpnt->device = NULL;
-                       dpnt->capacity = 0;
-                       SDp->attached--;
-                       sd_template.dev_noticed--;
-                       sd_template.nr_dev--;
-                       SD_GENDISK(i).nr_real--;
-                       return;
-               }
+    Scsi_Disk *dpnt;
+    struct gendisk *sdgd;
+    int i, j;
+    int max_p;
+    int start;
+    
+    if (rscsi_disks == NULL)
        return;
+    
+    for (dpnt = rscsi_disks, i = 0; i < sd_template.dev_max; i++, dpnt++)
+       if (dpnt->device == SDp) {
+           
+           /* If we are disconnecting a disk driver, sync and invalidate
+            * everything */
+           sdgd = &SD_GENDISK(i);
+           max_p = sd_gendisk.max_p;
+           start = i << sd_gendisk.minor_shift;
+           
+           for (j = max_p - 1; j >= 0; j--) {
+               int index = start + j;
+               invalidate_device(MKDEV_SD_PARTITION(index), 1);
+               sdgd->part[SD_MINOR_NUMBER(index)].start_sect = 0;
+               sdgd->part[SD_MINOR_NUMBER(index)].nr_sects = 0;
+               sd_sizes[index] = 0;
+           }
+#ifdef DEVFS_MUST_DIE
+           devfs_register_partitions (sdgd,
+                                      SD_MINOR_NUMBER (start), 1);
+#endif
+           /* unregister_disk() */
+           dpnt->has_part_table = 0;
+           dpnt->device = NULL;
+           dpnt->capacity = 0;
+           SDp->attached--;
+           sd_template.dev_noticed--;
+           sd_template.nr_dev--;
+           SD_GENDISK(i).nr_real--;
+           return;
+       }
+    return;
 }
 
 static int __init init_sd(void)
 {
-       sd_template.module = THIS_MODULE;
-       return scsi_register_module(MODULE_SCSI_DEV, &sd_template);
+    sd_template.module = THIS_MODULE;
+    return scsi_register_module(MODULE_SCSI_DEV, &sd_template);
 }
 
 static void __exit exit_sd(void)
 {
-       int i;
-
-       scsi_unregister_module(MODULE_SCSI_DEV, &sd_template);
-
-       for (i = 0; i < N_USED_SD_MAJORS; i++)
-               devfs_unregister_blkdev(SD_MAJOR(i), "sd");
-
-       sd_registered--;
-       if (rscsi_disks != NULL) {
-               kfree(rscsi_disks);
-               kfree(sd_sizes);
-               kfree(sd_blocksizes);
-               kfree(sd_hardsizes);
-               for (i = 0; i < N_USED_SD_MAJORS; i++) {
-#if 0 /* XXX aren't we forgetting to deallocate something? */
-                       kfree(sd_gendisks[i].de_arr);
-                       kfree(sd_gendisks[i].flags);
+    int i;
+    
+#if 0
+    scsi_unregister_module(MODULE_SCSI_DEV, &sd_template);
 #endif
-                       kfree(sd_gendisks[i].part);
-               }
-       }
+    
+#ifdef DEVFS_MUST_DIE
+    for (i = 0; i < N_USED_SD_MAJORS; i++)
+       devfs_unregister_blkdev(SD_MAJOR(i), "sd");
+#endif
+    
+    sd_registered--;
+    if (rscsi_disks != NULL) {
+       kfree(rscsi_disks);
+       kfree(sd_sizes);
+       kfree(sd_blocksizes);
+       kfree(sd_hardsizes);
        for (i = 0; i < N_USED_SD_MAJORS; i++) {
-               del_gendisk(&sd_gendisks[i]);
-               blk_size[SD_MAJOR(i)] = NULL;   /* XXX blksize_size actually? */
-               hardsect_size[SD_MAJOR(i)] = NULL;
-               read_ahead[SD_MAJOR(i)] = 0;
+#if 0 /* XXX aren't we forgetting to deallocate something? */
+           kfree(sd_gendisks[i].de_arr);
+           kfree(sd_gendisks[i].flags);
+#endif
+           kfree(sd_gendisks[i].part);
        }
-       sd_template.dev_max = 0;
-       if (sd_gendisks != NULL)    /* kfree tests for 0, but leave explicit */
-               kfree(sd_gendisks);
+    }
+    for (i = 0; i < N_USED_SD_MAJORS; i++) {
+       del_gendisk(&sd_gendisks[i]);
+       blk_size[SD_MAJOR(i)] = NULL;   /* XXX blksize_size actually? */
+       hardsect_size[SD_MAJOR(i)] = NULL;
+#if 0
+       read_ahead[SD_MAJOR(i)] = 0;
+#endif
+    }
+    sd_template.dev_max = 0;
+    if (sd_gendisks != NULL)    /* kfree tests for 0, but leave explicit */
+       kfree(sd_gendisks);
 }
 
 module_init(init_sd);
diff --git a/xen-2.4.16/include/asm-i386/dma.h b/xen-2.4.16/include/asm-i386/dma.h
new file mode 100644 (file)
index 0000000..f24c90a
--- /dev/null
@@ -0,0 +1,301 @@
+/* $Id: dma.h,v 1.7 1992/12/14 00:29:34 root Exp root $
+ * linux/include/asm/dma.h: Defines for using and allocating dma channels.
+ * Written by Hennus Bergman, 1992.
+ * High DMA channel support & info by Hannu Savolainen
+ * and John Boyd, Nov. 1992.
+ */
+
+#ifndef _ASM_DMA_H
+#define _ASM_DMA_H
+
+#include <linux/config.h>
+#include <linux/spinlock.h>    /* And spinlocks */
+#include <asm/io.h>            /* need byte IO */
+#include <linux/delay.h>
+
+
+#ifdef HAVE_REALLY_SLOW_DMA_CONTROLLER
+#define dma_outb       outb_p
+#else
+#define dma_outb       outb
+#endif
+
+#define dma_inb                inb
+
+/*
+ * NOTES about DMA transfers:
+ *
+ *  controller 1: channels 0-3, byte operations, ports 00-1F
+ *  controller 2: channels 4-7, word operations, ports C0-DF
+ *
+ *  - ALL registers are 8 bits only, regardless of transfer size
+ *  - channel 4 is not used - cascades 1 into 2.
+ *  - channels 0-3 are byte - addresses/counts are for physical bytes
+ *  - channels 5-7 are word - addresses/counts are for physical words
+ *  - transfers must not cross physical 64K (0-3) or 128K (5-7) boundaries
+ *  - transfer count loaded to registers is 1 less than actual count
+ *  - controller 2 offsets are all even (2x offsets for controller 1)
+ *  - page registers for 5-7 don't use data bit 0, represent 128K pages
+ *  - page registers for 0-3 use bit 0, represent 64K pages
+ *
+ * DMA transfers are limited to the lower 16MB of _physical_ memory.  
+ * Note that addresses loaded into registers must be _physical_ addresses,
+ * not logical addresses (which may differ if paging is active).
+ *
+ *  Address mapping for channels 0-3:
+ *
+ *   A23 ... A16 A15 ... A8  A7 ... A0    (Physical addresses)
+ *    |  ...  |   |  ... |   |  ... |
+ *    |  ...  |   |  ... |   |  ... |
+ *    |  ...  |   |  ... |   |  ... |
+ *   P7  ...  P0  A7 ... A0  A7 ... A0   
+ * |    Page    | Addr MSB | Addr LSB |   (DMA registers)
+ *
+ *  Address mapping for channels 5-7:
+ *
+ *   A23 ... A17 A16 A15 ... A9 A8 A7 ... A1 A0    (Physical addresses)
+ *    |  ...  |   \   \   ... \  \  \  ... \  \
+ *    |  ...  |    \   \   ... \  \  \  ... \  (not used)
+ *    |  ...  |     \   \   ... \  \  \  ... \
+ *   P7  ...  P1 (0) A7 A6  ... A0 A7 A6 ... A0   
+ * |      Page      |  Addr MSB   |  Addr LSB  |   (DMA registers)
+ *
+ * Again, channels 5-7 transfer _physical_ words (16 bits), so addresses
+ * and counts _must_ be word-aligned (the lowest address bit is _ignored_ at
+ * the hardware level, so odd-byte transfers aren't possible).
+ *
+ * Transfer count (_not # bytes_) is limited to 64K, represented as actual
+ * count - 1 : 64K => 0xFFFF, 1 => 0x0000.  Thus, count is always 1 or more,
+ * and up to 128K bytes may be transferred on channels 5-7 in one operation. 
+ *
+ */
+
+#define MAX_DMA_CHANNELS       8
+
+#if 0
+/* The maximum address that we can perform a DMA transfer to on this platform */
+#define MAX_DMA_ADDRESS      (PAGE_OFFSET+0x1000000)
+#endif
+
+
+/* 8237 DMA controllers */
+#define IO_DMA1_BASE   0x00    /* 8 bit slave DMA, channels 0..3 */
+#define IO_DMA2_BASE   0xC0    /* 16 bit master DMA, ch 4(=slave input)..7 */
+
+/* DMA controller registers */
+#define DMA1_CMD_REG           0x08    /* command register (w) */
+#define DMA1_STAT_REG          0x08    /* status register (r) */
+#define DMA1_REQ_REG            0x09    /* request register (w) */
+#define DMA1_MASK_REG          0x0A    /* single-channel mask (w) */
+#define DMA1_MODE_REG          0x0B    /* mode register (w) */
+#define DMA1_CLEAR_FF_REG      0x0C    /* clear pointer flip-flop (w) */
+#define DMA1_TEMP_REG           0x0D    /* Temporary Register (r) */
+#define DMA1_RESET_REG         0x0D    /* Master Clear (w) */
+#define DMA1_CLR_MASK_REG       0x0E    /* Clear Mask */
+#define DMA1_MASK_ALL_REG       0x0F    /* all-channels mask (w) */
+
+#define DMA2_CMD_REG           0xD0    /* command register (w) */
+#define DMA2_STAT_REG          0xD0    /* status register (r) */
+#define DMA2_REQ_REG            0xD2    /* request register (w) */
+#define DMA2_MASK_REG          0xD4    /* single-channel mask (w) */
+#define DMA2_MODE_REG          0xD6    /* mode register (w) */
+#define DMA2_CLEAR_FF_REG      0xD8    /* clear pointer flip-flop (w) */
+#define DMA2_TEMP_REG           0xDA    /* Temporary Register (r) */
+#define DMA2_RESET_REG         0xDA    /* Master Clear (w) */
+#define DMA2_CLR_MASK_REG       0xDC    /* Clear Mask */
+#define DMA2_MASK_ALL_REG       0xDE    /* all-channels mask (w) */
+
+#define DMA_ADDR_0              0x00    /* DMA address registers */
+#define DMA_ADDR_1              0x02
+#define DMA_ADDR_2              0x04
+#define DMA_ADDR_3              0x06
+#define DMA_ADDR_4              0xC0
+#define DMA_ADDR_5              0xC4
+#define DMA_ADDR_6              0xC8
+#define DMA_ADDR_7              0xCC
+
+#define DMA_CNT_0               0x01    /* DMA count registers */
+#define DMA_CNT_1               0x03
+#define DMA_CNT_2               0x05
+#define DMA_CNT_3               0x07
+#define DMA_CNT_4               0xC2
+#define DMA_CNT_5               0xC6
+#define DMA_CNT_6               0xCA
+#define DMA_CNT_7               0xCE
+
+#define DMA_PAGE_0              0x87    /* DMA page registers */
+#define DMA_PAGE_1              0x83
+#define DMA_PAGE_2              0x81
+#define DMA_PAGE_3              0x82
+#define DMA_PAGE_5              0x8B
+#define DMA_PAGE_6              0x89
+#define DMA_PAGE_7              0x8A
+
+#define DMA_MODE_READ  0x44    /* I/O to memory, no autoinit, increment, single mode */
+#define DMA_MODE_WRITE 0x48    /* memory to I/O, no autoinit, increment, single mode */
+#define DMA_MODE_CASCADE 0xC0   /* pass thru DREQ->HRQ, DACK<-HLDA only */
+
+#define DMA_AUTOINIT   0x10
+
+
+extern spinlock_t  dma_spin_lock;
+
+static __inline__ unsigned long claim_dma_lock(void)
+{
+       unsigned long flags;
+       spin_lock_irqsave(&dma_spin_lock, flags);
+       return flags;
+}
+
+static __inline__ void release_dma_lock(unsigned long flags)
+{
+       spin_unlock_irqrestore(&dma_spin_lock, flags);
+}
+
+/* enable/disable a specific DMA channel */
+static __inline__ void enable_dma(unsigned int dmanr)
+{
+       if (dmanr<=3)
+               dma_outb(dmanr,  DMA1_MASK_REG);
+       else
+               dma_outb(dmanr & 3,  DMA2_MASK_REG);
+}
+
+static __inline__ void disable_dma(unsigned int dmanr)
+{
+       if (dmanr<=3)
+               dma_outb(dmanr | 4,  DMA1_MASK_REG);
+       else
+               dma_outb((dmanr & 3) | 4,  DMA2_MASK_REG);
+}
+
+/* Clear the 'DMA Pointer Flip Flop'.
+ * Write 0 for LSB/MSB, 1 for MSB/LSB access.
+ * Use this once to initialize the FF to a known state.
+ * After that, keep track of it. :-)
+ * --- In order to do that, the DMA routines below should ---
+ * --- only be used while holding the DMA lock ! ---
+ */
+static __inline__ void clear_dma_ff(unsigned int dmanr)
+{
+       if (dmanr<=3)
+               dma_outb(0,  DMA1_CLEAR_FF_REG);
+       else
+               dma_outb(0,  DMA2_CLEAR_FF_REG);
+}
+
+/* set mode (above) for a specific DMA channel */
+static __inline__ void set_dma_mode(unsigned int dmanr, char mode)
+{
+       if (dmanr<=3)
+               dma_outb(mode | dmanr,  DMA1_MODE_REG);
+       else
+               dma_outb(mode | (dmanr&3),  DMA2_MODE_REG);
+}
+
+/* Set only the page register bits of the transfer address.
+ * This is used for successive transfers when we know the contents of
+ * the lower 16 bits of the DMA current address register, but a 64k boundary
+ * may have been crossed.
+ */
+static __inline__ void set_dma_page(unsigned int dmanr, char pagenr)
+{
+       switch(dmanr) {
+               case 0:
+                       dma_outb(pagenr, DMA_PAGE_0);
+                       break;
+               case 1:
+                       dma_outb(pagenr, DMA_PAGE_1);
+                       break;
+               case 2:
+                       dma_outb(pagenr, DMA_PAGE_2);
+                       break;
+               case 3:
+                       dma_outb(pagenr, DMA_PAGE_3);
+                       break;
+               case 5:
+                       dma_outb(pagenr & 0xfe, DMA_PAGE_5);
+                       break;
+               case 6:
+                       dma_outb(pagenr & 0xfe, DMA_PAGE_6);
+                       break;
+               case 7:
+                       dma_outb(pagenr & 0xfe, DMA_PAGE_7);
+                       break;
+       }
+}
+
+
+/* Set transfer address & page bits for specific DMA channel.
+ * Assumes dma flipflop is clear.
+ */
+static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a)
+{
+       set_dma_page(dmanr, a>>16);
+       if (dmanr <= 3)  {
+           dma_outb( a & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE );
+            dma_outb( (a>>8) & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE );
+       }  else  {
+           dma_outb( (a>>1) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE );
+           dma_outb( (a>>9) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE );
+       }
+}
+
+
+/* Set transfer size (max 64k for DMA1..3, 128k for DMA5..7) for
+ * a specific DMA channel.
+ * You must ensure the parameters are valid.
+ * NOTE: from a manual: "the number of transfers is one more
+ * than the initial word count"! This is taken into account.
+ * Assumes dma flip-flop is clear.
+ * NOTE 2: "count" represents _bytes_ and must be even for channels 5-7.
+ */
+static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count)
+{
+        count--;
+       if (dmanr <= 3)  {
+           dma_outb( count & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE );
+           dma_outb( (count>>8) & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE );
+        } else {
+           dma_outb( (count>>1) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE );
+           dma_outb( (count>>9) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE );
+        }
+}
+
+
+/* Get DMA residue count. After a DMA transfer, this
+ * should return zero. Reading this while a DMA transfer is
+ * still in progress will return unpredictable results.
+ * If called before the channel has been used, it may return 1.
+ * Otherwise, it returns the number of _bytes_ left to transfer.
+ *
+ * Assumes DMA flip-flop is clear.
+ */
+static __inline__ int get_dma_residue(unsigned int dmanr)
+{
+       unsigned int io_port = (dmanr<=3)? ((dmanr&3)<<1) + 1 + IO_DMA1_BASE
+                                        : ((dmanr&3)<<2) + 2 + IO_DMA2_BASE;
+
+       /* using short to get 16-bit wrap around */
+       unsigned short count;
+
+       count = 1 + dma_inb(io_port);
+       count += dma_inb(io_port) << 8;
+       
+       return (dmanr<=3)? count : (count<<1);
+}
+
+
+/* These are in kernel/dma.c: */
+extern int request_dma(unsigned int dmanr, const char * device_id);    /* reserve a DMA channel */
+extern void free_dma(unsigned int dmanr);      /* release it again */
+
+/* From PCI */
+
+#ifdef CONFIG_PCI
+extern int isa_dma_bridge_buggy;
+#else
+#define isa_dma_bridge_buggy   (0)
+#endif
+
+#endif /* _ASM_DMA_H */
index 0102f3e205cf17d8ffb1e11fd68bbc2b1e1f8ac2..81778b6d93d7ad082e448afa4082a0a0fce67d07 100644 (file)
@@ -58,7 +58,8 @@ enum
        HI_SOFTIRQ=0,
        NET_TX_SOFTIRQ,
        NET_RX_SOFTIRQ,
-       TASKLET_SOFTIRQ
+       TASKLET_SOFTIRQ, 
+       SCSI_LOW_SOFTIRQ,
 };
 
 /* softirq mask and active fields moved to irq_cpustat_t in